patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -61,5 +61,6 @@ namespace Nethermind.Core.Specs.Forks
public bool IsEip1052Enabled => true;
public bool IsEip1283Enabled => false;
public bool IsEip1234Enabled => true;
+ public bool IsEip1344Enabled => false;
}
} | 1 | /*
* Copyright (c) 2018 Demerzel Solutions Limited
* This file is part of the Nethermind library.
*
* The Nethermind library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The Nethermind library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
*/
using System.Threading;
using Nethermind.Dirichlet.Numerics;
namespace Nethermind.Core.Specs.Forks
{
public class ConstantinopleFix : IReleaseSpec
{
private static IReleaseSpec _instance;
private ConstantinopleFix()
{
}
public static IReleaseSpec Instance => LazyInitializer.EnsureInitialized(ref _instance, () => new ConstantinopleFix());
public long MaximumExtraDataSize => 32;
public long MaxCodeSize => 24576;
public long MinGasLimit => 5000;
public long GasLimitBoundDivisor => 0x0400;
public Address Registrar => new Address("0xe3389675d0338462dC76C6f9A3e432550c36A142");
public UInt256 BlockReward => UInt256.Parse("2000000000000000000");
public long DifficultyBombDelay => 5000000L;
public long DifficultyBoundDivisor => 0x0800;
public bool IsTimeAdjustmentPostOlympic => true;
public bool IsEip2Enabled => true;
public bool IsEip7Enabled => true;
public bool IsEip100Enabled => true;
public bool IsEip140Enabled => true;
public bool IsEip150Enabled => true;
public bool IsEip155Enabled => true;
public bool IsEip158Enabled => true;
public bool IsEip160Enabled => true;
public bool IsEip170Enabled => true;
public bool IsEip196Enabled => true;
public bool IsEip197Enabled => true;
public bool IsEip198Enabled => true;
public bool IsEip211Enabled => true;
public bool IsEip214Enabled => true;
public bool IsEip649Enabled => true;
public bool IsEip658Enabled => true;
public bool IsEip145Enabled => true;
public bool IsEip1014Enabled => true;
public bool IsEip1052Enabled => true;
public bool IsEip1283Enabled => false;
public bool IsEip1234Enabled => true;
}
} | 1 | 22,440 | Can we add Istanbul with this first EIP please? (or as a separate issue) | NethermindEth-nethermind | .cs |
@@ -71,10 +71,10 @@ func (sn Snapshot) ID() backend.ID {
func (sn *Snapshot) fillUserInfo() error {
usr, err := user.Current()
if err != nil {
- return err
+ return nil
}
-
sn.Username = usr.Username
+
uid, err := strconv.ParseInt(usr.Uid, 10, 32)
if err != nil {
return err | 1 | package restic
import (
"fmt"
"os"
"os/user"
"path/filepath"
"strconv"
"time"
"github.com/restic/restic/backend"
"github.com/restic/restic/repository"
)
type Snapshot struct {
Time time.Time `json:"time"`
Parent backend.ID `json:"parent,omitempty"`
Tree backend.ID `json:"tree"`
Paths []string `json:"paths"`
Hostname string `json:"hostname,omitempty"`
Username string `json:"username,omitempty"`
UID uint32 `json:"uid,omitempty"`
GID uint32 `json:"gid,omitempty"`
id backend.ID // plaintext ID, used during restore
}
func NewSnapshot(paths []string) (*Snapshot, error) {
for i, path := range paths {
if p, err := filepath.Abs(path); err != nil {
paths[i] = p
}
}
sn := &Snapshot{
Paths: paths,
Time: time.Now(),
}
hn, err := os.Hostname()
if err == nil {
sn.Hostname = hn
}
err = sn.fillUserInfo()
if err != nil {
return nil, err
}
return sn, nil
}
func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error) {
sn := &Snapshot{id: id}
err := repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
if err != nil {
return nil, err
}
return sn, nil
}
func (sn Snapshot) String() string {
return fmt.Sprintf("<Snapshot of %v at %s>", sn.Paths, sn.Time)
}
func (sn Snapshot) ID() backend.ID {
return sn.id
}
func (sn *Snapshot) fillUserInfo() error {
usr, err := user.Current()
if err != nil {
return err
}
sn.Username = usr.Username
uid, err := strconv.ParseInt(usr.Uid, 10, 32)
if err != nil {
return err
}
sn.UID = uint32(uid)
gid, err := strconv.ParseInt(usr.Gid, 10, 32)
if err != nil {
return err
}
sn.GID = uint32(gid)
return nil
}
| 1 | 6,622 | I don't understand this change | restic-restic | go |
@@ -2248,7 +2248,7 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
- self.show_warning(str(e))
+ self.show_warning("Invalid Public key")
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message')) | 1 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum import keystore, simple_config, ecc
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum import constants
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate)
from electrum import Transaction
from electrum import util, bitcoin, commands, coinchooser
from electrum import paymentrequest
from electrum.wallet import Multisig_Wallet, AddTransactionException
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast_transaction(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.get_transaction(txid)
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
| 1 | 12,650 | Let's allow translators for this string to be translated: `self.show_warning(_('Invalid Public key'))` | spesmilo-electrum | py |
@@ -0,0 +1,13 @@
+var landmarks = axe.commons.aria.getRolesByType('landmark');
+var parent = axe.commons.dom.getComposedParent(node);
+while (parent){
+ var role = parent.getAttribute('role');
+ if (!role && (parent.tagName.toLowerCase() !== 'form')){
+ role = axe.commons.aria.implicitRole(parent);
+ }
+ if (role && landmarks.includes(role)){
+ return false;
+ }
+ parent = axe.commons.dom.getComposedParent(parent);
+}
+return true; | 1 | 1 | 11,447 | This needs to work with `role=form` too. | dequelabs-axe-core | js |
|
@@ -1039,6 +1039,11 @@ Blockly.BlockSvg.prototype.handleDragFree_ = function(oldXY, newXY, e) {
}
}
+ // Always update previews for output connections.
+ if (localConnection && localConnection.type == Blockly.OUTPUT_VALUE) {
+ updatePreviews = true;
+ }
+
if (updatePreviews) {
var candidateIsLast = (localConnection == lastOnStack);
this.updatePreviews(closestConnection, localConnection, radiusConnection, | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Methods for graphically rendering a block as SVG.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.BlockSvg');
goog.require('Blockly.Block');
goog.require('Blockly.ContextMenu');
goog.require('Blockly.RenderedConnection');
goog.require('goog.Timer');
goog.require('goog.asserts');
goog.require('goog.dom');
goog.require('goog.math.Coordinate');
goog.require('goog.userAgent');
/**
* Class for a block's SVG representation.
* Not normally called directly, workspace.newBlock() is preferred.
* @param {!Blockly.Workspace} workspace The block's workspace.
* @param {?string} prototypeName Name of the language object containing
* type-specific functions for this block.
* @param {=string} opt_id Optional ID. Use this ID if provided, otherwise
* create a new id.
* @extends {Blockly.Block}
* @constructor
*/
Blockly.BlockSvg = function(workspace, prototypeName, opt_id) {
// Create core elements for the block.
/**
* @type {SVGElement}
* @private
*/
this.svgGroup_ = Blockly.createSvgElement('g', {}, null);
/** @type {SVGElement} */
this.svgPath_ = Blockly.createSvgElement('path', {'class': 'blocklyPath'},
this.svgGroup_);
this.svgPath_.tooltip = this;
/** @type {boolean} */
this.rendered = false;
Blockly.Tooltip.bindMouseEvents(this.svgPath_);
Blockly.BlockSvg.superClass_.constructor.call(this,
workspace, prototypeName, opt_id);
};
goog.inherits(Blockly.BlockSvg, Blockly.Block);
/**
* Height of this block, not including any statement blocks above or below.
* @type {number}
*/
Blockly.BlockSvg.prototype.height = 0;
/**
* Width of this block, including any connected value blocks.
* @type {number}
*/
Blockly.BlockSvg.prototype.width = 0;
/**
* Opacity of this block between 0 and 1.
* @type {number}
* @private
*/
Blockly.BlockSvg.prototype.opacity_ = 1;
/**
* Original location of block being dragged.
* @type {goog.math.Coordinate}
* @private
*/
Blockly.BlockSvg.prototype.dragStartXY_ = null;
/**
* Whether the block glows as if running.
* @type {boolean}
* @private
*/
Blockly.BlockSvg.prototype.isGlowingBlock_ = false;
/**
* Whether the block's whole stack glows as if running.
* @type {boolean}
* @private
*/
Blockly.BlockSvg.prototype.isGlowingStack_ = false;
/**
* Constant for identifying rows that are to be rendered inline.
* Don't collide with Blockly.INPUT_VALUE and friends.
* @const
*/
Blockly.BlockSvg.INLINE = -1;
/**
* Create and initialize the SVG representation of the block.
* May be called more than once.
*/
Blockly.BlockSvg.prototype.initSvg = function() {
goog.asserts.assert(this.workspace.rendered, 'Workspace is headless.');
// Input shapes are empty holes drawn when a value input is not connected.
this.inputShapes_ = {};
for (var i = 0, input; input = this.inputList[i]; i++) {
input.init();
if (input.type === Blockly.INPUT_VALUE) {
this.initInputShape(input);
}
}
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].createIcon();
}
this.updateColour();
this.updateMovable();
if (!this.workspace.options.readOnly && !this.eventsInit_) {
Blockly.bindEvent_(this.getSvgRoot(), 'mousedown', this,
this.onMouseDown_);
var thisBlock = this;
Blockly.bindEvent_(this.getSvgRoot(), 'touchstart', null,
function(e) {Blockly.longStart_(e, thisBlock);});
}
this.eventsInit_ = true;
if (!this.getSvgRoot().parentNode) {
this.workspace.getCanvas().appendChild(this.getSvgRoot());
}
};
/**
* Create and initialize the SVG element for an input shape.
* @param {!Blockly.Input} input Value input to add a shape SVG element for.
*/
Blockly.BlockSvg.prototype.initInputShape = function(input) {
this.inputShapes_[input.name] = Blockly.createSvgElement(
'path',
{
'class': 'blocklyPath',
'style': 'visibility: hidden' // Hide by default - shown when not connected.
},
this.svgGroup_
);
};
/**
* Select this block. Highlight it visually.
*/
Blockly.BlockSvg.prototype.select = function() {
if (this.isShadow() && this.getParent()) {
// Shadow blocks should not be selected.
this.getParent().select();
return;
}
if (Blockly.selected == this) {
return;
}
var oldId = null;
if (Blockly.selected) {
oldId = Blockly.selected.id;
// Unselect any previously selected block.
Blockly.Events.disable();
Blockly.selected.unselect();
Blockly.Events.enable();
}
var event = new Blockly.Events.Ui(null, 'selected', oldId, this.id);
event.workspaceId = this.workspace.id;
Blockly.Events.fire(event);
Blockly.selected = this;
this.addSelect();
};
/**
* Unselect this block. Remove its highlighting.
*/
Blockly.BlockSvg.prototype.unselect = function() {
if (Blockly.selected != this) {
return;
}
var event = new Blockly.Events.Ui(null, 'selected', this.id, null);
event.workspaceId = this.workspace.id;
Blockly.Events.fire(event);
Blockly.selected = null;
this.removeSelect();
};
/**
* Glow only this particular block, to highlight it visually as if it's running.
* @param {boolean} isGlowingBlock Whether the block should glow.
*/
Blockly.BlockSvg.prototype.setGlowBlock = function(isGlowingBlock) {
this.isGlowingBlock_ = isGlowingBlock;
this.updateColour();
};
/**
* Glow the stack starting with this block, to highlight it visually as if it's running.
* @param {boolean} isGlowingStack Whether the stack starting with this block should glow.
*/
Blockly.BlockSvg.prototype.setGlowStack = function(isGlowingStack) {
this.isGlowingStack_ = isGlowingStack;
// Update the applied SVG filter if the property has changed
var svg = this.getSvgRoot();
if (this.isGlowingStack_ && !svg.hasAttribute('filter')) {
svg.setAttribute('filter', 'url(#blocklyStackGlowFilter)');
} else if (!this.isGlowingStack_ && svg.hasAttribute('filter')) {
svg.removeAttribute('filter');
}
};
/**
* Block's mutator icon (if any).
* @type {Blockly.Mutator}
*/
Blockly.BlockSvg.prototype.mutator = null;
/**
* Block's comment icon (if any).
* @type {Blockly.Comment}
*/
Blockly.BlockSvg.prototype.comment = null;
/**
* Block's warning icon (if any).
* @type {Blockly.Warning}
*/
Blockly.BlockSvg.prototype.warning = null;
/**
* Returns a list of mutator, comment, and warning icons.
* @return {!Array} List of icons.
*/
Blockly.BlockSvg.prototype.getIcons = function() {
var icons = [];
if (this.mutator) {
icons.push(this.mutator);
}
if (this.comment) {
icons.push(this.comment);
}
if (this.warning) {
icons.push(this.warning);
}
return icons;
};
/**
* Wrapper function called when a mouseUp occurs during a drag operation.
* @type {Array.<!Array>}
* @private
*/
Blockly.BlockSvg.onMouseUpWrapper_ = null;
/**
* Wrapper function called when a mouseMove occurs during a drag operation.
* @type {Array.<!Array>}
* @private
*/
Blockly.BlockSvg.onMouseMoveWrapper_ = null;
/**
* Stop binding to the global mouseup and mousemove events.
* @private
*/
Blockly.BlockSvg.terminateDrag_ = function() {
if (Blockly.BlockSvg.onMouseUpWrapper_) {
Blockly.unbindEvent_(Blockly.BlockSvg.onMouseUpWrapper_);
Blockly.BlockSvg.onMouseUpWrapper_ = null;
}
if (Blockly.BlockSvg.onMouseMoveWrapper_) {
Blockly.unbindEvent_(Blockly.BlockSvg.onMouseMoveWrapper_);
Blockly.BlockSvg.onMouseMoveWrapper_ = null;
}
var selected = Blockly.selected;
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
// Terminate a drag operation.
if (selected) {
if (Blockly.replacementMarker_) {
Blockly.BlockSvg.removeReplacementMarker();
} else if (Blockly.insertionMarker_) {
Blockly.Events.disable();
if (Blockly.insertionMarkerConnection_) {
Blockly.BlockSvg.disconnectInsertionMarker();
}
Blockly.insertionMarker_.dispose();
Blockly.insertionMarker_ = null;
Blockly.Events.enable();
}
// Update the connection locations.
var xy = selected.getRelativeToSurfaceXY();
var dxy = goog.math.Coordinate.difference(xy, selected.dragStartXY_);
var event = new Blockly.Events.Move(selected);
event.oldCoordinate = selected.dragStartXY_;
event.recordNew();
Blockly.Events.fire(event);
selected.moveConnections_(dxy.x, dxy.y);
delete selected.draggedBubbles_;
selected.setDragging_(false);
selected.moveOffDragSurface_();
selected.render();
// Ensure that any stap and bump are part of this move's event group.
var group = Blockly.Events.getGroup();
setTimeout(function() {
Blockly.Events.setGroup(group);
selected.snapToGrid();
Blockly.Events.setGroup(false);
}, Blockly.BUMP_DELAY / 2);
setTimeout(function() {
Blockly.Events.setGroup(group);
selected.bumpNeighbours_();
Blockly.Events.setGroup(false);
}, Blockly.BUMP_DELAY);
// Fire an event to allow scrollbars to resize.
Blockly.asyncSvgResize(this.workspace);
}
}
Blockly.dragMode_ = Blockly.DRAG_NONE;
Blockly.Css.setCursor(Blockly.Css.Cursor.OPEN);
};
/**
* Set parent of this block to be a new block or null.
* @param {Blockly.BlockSvg} newParent New parent block.
*/
Blockly.BlockSvg.prototype.setParent = function(newParent) {
if (newParent == this.parentBlock_) {
return;
}
var svgRoot = this.getSvgRoot();
if (this.parentBlock_ && svgRoot) {
// Move this block up the DOM. Keep track of x/y translations.
var xy = this.getRelativeToSurfaceXY();
// Avoid moving a block up the DOM if it's currently selected/dragging,
// so as to avoid taking things off the drag surface.
if (Blockly.selected != this) {
this.workspace.getCanvas().appendChild(svgRoot);
this.translate(xy.x, xy.y);
}
}
Blockly.Field.startCache();
Blockly.BlockSvg.superClass_.setParent.call(this, newParent);
Blockly.Field.stopCache();
if (newParent) {
var oldXY = this.getRelativeToSurfaceXY();
newParent.getSvgRoot().appendChild(svgRoot);
var newXY = this.getRelativeToSurfaceXY();
// Move the connections to match the child's new position.
this.moveConnections_(newXY.x - oldXY.x, newXY.y - oldXY.y);
// If we are a shadow block, inherit tertiary colour.
if (this.isShadow()) {
this.setColour(this.getColour(), this.getColourSecondary(),
newParent.getColourTertiary());
}
}
};
/**
* Return the coordinates of the top-left corner of this block relative to the
* drawing surface's origin (0,0).
* @return {!goog.math.Coordinate} Object with .x and .y properties.
*/
Blockly.BlockSvg.prototype.getRelativeToSurfaceXY = function() {
// The drawing surface is relative to either the workspace canvas
// or to the drag surface group.
var x = 0;
var y = 0;
var dragSurfaceGroup = (this.workspace.dragSurface) ?
this.workspace.dragSurface.getGroup() : null;
var element = this.getSvgRoot();
if (element) {
do {
// Loop through this block and every parent.
var xy = Blockly.getRelativeXY_(element);
x += xy.x;
y += xy.y;
// If this element is the current element on the drag surface, include
// the translation of the drag surface itself.
if (this.workspace.dragSurface &&
this.workspace.dragSurface.getCurrentBlock() == element) {
var surfaceTranslation = this.workspace.dragSurface.getSurfaceTranslation();
x += surfaceTranslation.x;
y += surfaceTranslation.y;
}
element = element.parentNode;
} while (element && element != this.workspace.getCanvas() &&
element != dragSurfaceGroup);
}
return new goog.math.Coordinate(x, y);
};
/**
* Move a block by a relative offset.
* @param {number} dx Horizontal offset.
* @param {number} dy Vertical offset.
*/
Blockly.BlockSvg.prototype.moveBy = function(dx, dy) {
goog.asserts.assert(!this.parentBlock_, 'Block has parent.');
var eventsEnabled = Blockly.Events.isEnabled();
if (eventsEnabled) {
var event = new Blockly.Events.Move(this);
}
var xy = this.getRelativeToSurfaceXY();
this.translate(xy.x + dx, xy.y + dy);
this.moveConnections_(dx, dy);
if (eventsEnabled) {
event.recordNew();
Blockly.Events.fire(event);
}
};
/**
* Set this block to an absolute translation.
* @param {number} x Horizontal translation.
* @param {number} y Vertical translation.
* @param {boolean=} opt_use3d If set, use 3d translation.
*/
Blockly.BlockSvg.prototype.translate = function(x, y, opt_use3d) {
if (opt_use3d) {
this.getSvgRoot().setAttribute('style', 'transform: translate3d(' + x + 'px,' + y + 'px, 0px)');
} else {
this.getSvgRoot().setAttribute('transform', 'translate(' + x + ',' + y + ')');
}
};
/**
* Snap this block to the nearest grid point.
*/
Blockly.BlockSvg.prototype.snapToGrid = function() {
if (!this.workspace) {
return; // Deleted block.
}
if (Blockly.dragMode_ != Blockly.DRAG_NONE) {
return; // Don't bump blocks during a drag.
}
if (this.getParent()) {
return; // Only snap top-level blocks.
}
if (this.isInFlyout) {
return; // Don't move blocks around in a flyout.
}
if (!this.workspace.options.gridOptions ||
!this.workspace.options.gridOptions['snap']) {
return; // Config says no snapping.
}
var spacing = this.workspace.options.gridOptions['spacing'];
var half = spacing / 2;
var xy = this.getRelativeToSurfaceXY();
var dx = Math.round((xy.x - half) / spacing) * spacing + half - xy.x;
var dy = Math.round((xy.y - half) / spacing) * spacing + half - xy.y;
dx = Math.round(dx);
dy = Math.round(dy);
if (dx != 0 || dy != 0) {
this.moveBy(dx, dy);
}
};
/**
* Returns the coordinates of a bounding box describing the dimensions of this
* block and any blocks stacked below it.
* @return {!{topLeft: goog.math.Coordinate, bottomRight: goog.math.Coordinate}}
* Object with top left and bottom right coordinates of the bounding box.
*/
Blockly.BlockSvg.prototype.getBoundingRectangle = function() {
var blockXY = this.getRelativeToSurfaceXY(this);
var blockBounds = this.getHeightWidth();
var topLeft;
var bottomRight;
if (this.RTL) {
topLeft = new goog.math.Coordinate(blockXY.x - blockBounds.width,
blockXY.y);
bottomRight = new goog.math.Coordinate(blockXY.x,
blockXY.y + blockBounds.height);
} else {
topLeft = new goog.math.Coordinate(blockXY.x, blockXY.y);
bottomRight = new goog.math.Coordinate(blockXY.x + blockBounds.width,
blockXY.y + blockBounds.height);
}
return {topLeft: topLeft, bottomRight: bottomRight};
};
/**
* Set block opacity for SVG rendering.
* @param {number} opacity Intended opacity, betweeen 0 and 1
*/
Blockly.BlockSvg.prototype.setOpacity = function(opacity) {
this.opacity_ = opacity;
if (this.rendered) {
this.updateColour();
}
};
/**
* Get block opacity for SVG rendering.
* @return {number} Intended opacity, betweeen 0 and 1
*/
Blockly.BlockSvg.prototype.getOpacity = function() {
return this.opacity_;
};
/**
* Set whether the block is collapsed or not.
* @param {boolean} collapsed True if collapsed.
*/
Blockly.BlockSvg.prototype.setCollapsed = function(collapsed) {
if (this.collapsed_ == collapsed) {
return;
}
var renderList = [];
// Show/hide the inputs.
for (var i = 0, input; input = this.inputList[i]; i++) {
renderList.push.apply(renderList, input.setVisible(!collapsed));
}
var COLLAPSED_INPUT_NAME = '_TEMP_COLLAPSED_INPUT';
if (collapsed) {
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].setVisible(false);
}
var text = this.toString(Blockly.COLLAPSE_CHARS);
this.appendDummyInput(COLLAPSED_INPUT_NAME).appendField(text).init();
} else {
this.removeInput(COLLAPSED_INPUT_NAME);
// Clear any warnings inherited from enclosed blocks.
this.setWarningText(null);
}
Blockly.BlockSvg.superClass_.setCollapsed.call(this, collapsed);
if (!renderList.length) {
// No child blocks, just render this block.
renderList[0] = this;
}
if (this.rendered) {
for (var i = 0, block; block = renderList[i]; i++) {
block.render();
}
// Don't bump neighbours.
// Although bumping neighbours would make sense, users often collapse
// all their functions and store them next to each other. Expanding and
// bumping causes all their definitions to go out of alignment.
}
};
/**
* Open the next (or previous) FieldTextInput.
* @param {Blockly.Field|Blockly.Block} start Current location.
* @param {boolean} forward If true go forward, otherwise backward.
*/
Blockly.BlockSvg.prototype.tab = function(start, forward) {
// This function need not be efficient since it runs once on a keypress.
// Create an ordered list of all text fields and connected inputs.
var list = [];
for (var i = 0, input; input = this.inputList[i]; i++) {
for (var j = 0, field; field = input.fieldRow[j]; j++) {
if (field instanceof Blockly.FieldTextInput) {
// TODO: Also support dropdown fields.
list.push(field);
}
}
if (input.connection) {
var block = input.connection.targetBlock();
if (block) {
list.push(block);
}
}
}
i = list.indexOf(start);
if (i == -1) {
// No start location, start at the beginning or end.
i = forward ? -1 : list.length;
}
var target = list[forward ? i + 1 : i - 1];
if (!target) {
// Ran off of list.
var parent = this.getParent();
if (parent) {
parent.tab(this, forward);
}
} else if (target instanceof Blockly.Field) {
target.showEditor_();
} else {
target.tab(null, forward);
}
};
/**
* Handle a mouse-down on an SVG block.
* @param {!Event} e Mouse down event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseDown_ = function(e) {
if (this.workspace.options.readOnly) {
return;
}
if (this.isInFlyout) {
e.stopPropagation();
return;
}
this.workspace.markFocused();
// Update Blockly's knowledge of its own location.
Blockly.svgResize(this.workspace);
Blockly.terminateDrag_();
this.select();
Blockly.hideChaff();
this.workspace.recordDeleteAreas();
if (Blockly.isRightButton(e)) {
// Right-click.
this.showContextMenu_(e);
} else if (!this.isMovable()) {
// Allow immovable blocks to be selected and context menued, but not
// dragged. Let this event bubble up to document, so the workspace may be
// dragged instead.
return;
} else {
if (!Blockly.Events.getGroup()) {
Blockly.Events.setGroup(true);
}
// Left-click (or middle click)
Blockly.Css.setCursor(Blockly.Css.Cursor.CLOSED);
this.dragStartXY_ = this.getRelativeToSurfaceXY();
this.workspace.startDrag(e, this.dragStartXY_);
Blockly.dragMode_ = Blockly.DRAG_STICKY;
Blockly.BlockSvg.onMouseUpWrapper_ = Blockly.bindEvent_(document,
'mouseup', this, this.onMouseUp_);
Blockly.BlockSvg.onMouseMoveWrapper_ = Blockly.bindEvent_(document,
'mousemove', this, this.onMouseMove_);
// Build a list of bubbles that need to be moved and where they started.
this.draggedBubbles_ = [];
var descendants = this.getDescendants();
for (var i = 0, descendant; descendant = descendants[i]; i++) {
var icons = descendant.getIcons();
for (var j = 0; j < icons.length; j++) {
var data = icons[j].getIconLocation();
data.bubble = icons[j];
this.draggedBubbles_.push(data);
}
}
}
// This event has been handled. No need to bubble up to the document.
e.stopPropagation();
e.preventDefault();
};
/**
* Handle a mouse-up anywhere in the SVG pane. Is only registered when a
* block is clicked. We can't use mouseUp on the block since a fast-moving
* cursor can briefly escape the block before it catches up.
* @param {!Event} e Mouse up event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseUp_ = function(e) {
var isNotShadowBlock = this.ioClickHackIsNotShadow_(e);
if (Blockly.dragMode_ != Blockly.DRAG_FREE && !Blockly.WidgetDiv.isVisible() && isNotShadowBlock) {
Blockly.Events.fire(
new Blockly.Events.Ui(this, 'click', undefined, undefined));
// Scratch-specific: also fire a "stack click" event for this stack.
// This is used to toggle the stack when any block in the stack is clicked.
var rootBlock = this.workspace.getBlockById(this.id).getRootBlock();
Blockly.Events.fire(
new Blockly.Events.Ui(rootBlock, 'stackclick', undefined, undefined));
}
Blockly.terminateDrag_();
if (Blockly.selected && Blockly.highlightedConnection_) {
this.positionNewBlock(Blockly.selected,
Blockly.localConnection_, Blockly.highlightedConnection_);
// Connect two blocks together.
Blockly.localConnection_.connect(Blockly.highlightedConnection_);
if (this.rendered) {
// Trigger a connection animation.
// Determine which connection is inferior (lower in the source stack).
var inferiorConnection = Blockly.localConnection_.isSuperior() ?
Blockly.highlightedConnection_ : Blockly.localConnection_;
inferiorConnection.getSourceBlock().connectionUiEffect();
}
if (this.workspace.trashcan) {
// Don't throw an object in the trash can if it just got connected.
this.workspace.trashcan.close();
}
} else if (!this.getParent() && Blockly.selected.isDeletable() &&
this.workspace.isDeleteArea(e)) {
var trashcan = this.workspace.trashcan;
if (trashcan) {
goog.Timer.callOnce(trashcan.close, 100, trashcan);
}
Blockly.selected.dispose(false, true);
// Dropping a block on the trash can will usually cause the workspace to
// resize to contain the newly positioned block. Force a second resize
// now that the block has been deleted.
Blockly.asyncSvgResize(this.workspace);
}
if (Blockly.highlightedConnection_) {
Blockly.highlightedConnection_ = null;
}
Blockly.Css.setCursor(Blockly.Css.Cursor.OPEN);
if (!Blockly.WidgetDiv.isVisible()) {
Blockly.Events.setGroup(false);
}
};
/**
* XXX: Hack to fix drop-down clicking issue for Google I/O.
* We cannot just check isShadow, since `this` is the parent block.
* See: https://github.com/google/blockly/issues/336
* @param {!Event} e Mouse up event.
* @return {boolean} True if the block is not the drop-down shadow.
*/
Blockly.BlockSvg.prototype.ioClickHackIsNotShadow_ = function(e) {
// True if click target is a non-shadow block path.
if (e.target === this.svgPath_ &&
e.target.parentNode === this.getSvgRoot()) {
return true;
}
for (var i = 0, input; input = this.inputList[i]; i++) {
for (var j = 0, field; field = input.fieldRow[j]; j++) {
if (field.imageElement_ && field.imageElement_ === e.target) {
return true;
}
}
}
return false;
};
/**
* Load the block's help page in a new window.
* @private
*/
Blockly.BlockSvg.prototype.showHelp_ = function() {
var url = goog.isFunction(this.helpUrl) ? this.helpUrl() : this.helpUrl;
if (url) {
// @todo rewrite
alert(url);
}
};
/**
* Show the context menu for this block.
* @param {!Event} e Mouse event.
* @private
*/
Blockly.BlockSvg.prototype.showContextMenu_ = function(e) {
if (this.workspace.options.readOnly || !this.contextMenu) {
return;
}
// Save the current block in a variable for use in closures.
var block = this;
var menuOptions = [];
if (this.isDeletable() && this.isMovable() && !block.isInFlyout) {
// Option to duplicate this block.
var duplicateOption = {
text: Blockly.Msg.DUPLICATE_BLOCK,
enabled: true,
callback: function() {
Blockly.duplicate_(block);
}
};
if (this.getDescendants().length > this.workspace.remainingCapacity()) {
duplicateOption.enabled = false;
}
menuOptions.push(duplicateOption);
if (this.isEditable() && this.workspace.options.comments) {
// Option to add/remove a comment.
var commentOption = {enabled: !goog.userAgent.IE};
if (this.comment) {
commentOption.text = Blockly.Msg.REMOVE_COMMENT;
commentOption.callback = function() {
block.setCommentText(null);
};
} else {
commentOption.text = Blockly.Msg.ADD_COMMENT;
commentOption.callback = function() {
block.setCommentText('');
};
}
menuOptions.push(commentOption);
}
// Option to delete this block.
// Count the number of blocks that are nested in this block.
var descendantCount = this.getDescendants(true).length;
var nextBlock = this.getNextBlock();
if (nextBlock) {
// Blocks in the current stack would survive this block's deletion.
descendantCount -= nextBlock.getDescendants(true).length;
}
var deleteOption = {
text: descendantCount == 1 ? Blockly.Msg.DELETE_BLOCK :
Blockly.Msg.DELETE_X_BLOCKS.replace('%1', String(descendantCount)),
enabled: true,
callback: function() {
Blockly.Events.setGroup(true);
block.dispose(true, true);
Blockly.Events.setGroup(false);
}
};
menuOptions.push(deleteOption);
}
// Option to get help.
var url = goog.isFunction(this.helpUrl) ? this.helpUrl() : this.helpUrl;
var helpOption = {enabled: !!url};
helpOption.text = Blockly.Msg.HELP;
helpOption.callback = function() {
block.showHelp_();
};
menuOptions.push(helpOption);
// Allow the block to add or modify menuOptions.
if (this.customContextMenu && !block.isInFlyout) {
this.customContextMenu(menuOptions);
}
Blockly.ContextMenu.show(e, menuOptions, this.RTL);
Blockly.ContextMenu.currentBlock = this;
};
/**
* Move the connections for this block and all blocks attached under it.
* Also update any attached bubbles.
* @param {number} dx Horizontal offset from current location.
* @param {number} dy Vertical offset from current location.
* @private
*/
Blockly.BlockSvg.prototype.moveConnections_ = function(dx, dy) {
if (!this.rendered) {
// Rendering is required to lay out the blocks.
// This is probably an invisible block attached to a collapsed block.
return;
}
var myConnections = this.getConnections_(false);
for (var i = 0; i < myConnections.length; i++) {
myConnections[i].moveBy(dx, dy);
}
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].computeIconLocation();
}
// Recurse through all blocks attached under this one.
for (i = 0; i < this.childBlocks_.length; i++) {
this.childBlocks_[i].moveConnections_(dx, dy);
}
};
/**
* Recursively adds or removes the dragging class to this node and its children.
* @param {boolean} adding True if adding, false if removing.
* @private
*/
Blockly.BlockSvg.prototype.setDragging_ = function(adding) {
if (adding) {
this.addDragging();
Blockly.draggingConnections_ =
Blockly.draggingConnections_.concat(this.getConnections_(true));
} else {
this.removeDragging();
Blockly.draggingConnections_ = [];
}
// Recurse through all blocks attached under this one.
for (var i = 0; i < this.childBlocks_.length; i++) {
this.childBlocks_[i].setDragging_(adding);
}
};
/**
* Move this block to its workspace's drag surface, accounting for positioning.
* Generally should be called at the same time as setDragging_(true).
* @private
*/
Blockly.BlockSvg.prototype.moveToDragSurface_ = function() {
// The translation for drag surface blocks,
// is equal to the current relative-to-surface position,
// to keep the position in sync as it move on/off the surface.
var xy = this.getRelativeToSurfaceXY();
this.clearTransformAttributes_();
this.workspace.dragSurface.translateSurface(xy.x, xy.y);
// Execute the move on the top-level SVG component
this.workspace.dragSurface.setBlocksAndShow(this.getSvgRoot());
};
/**
* Move this block back to the workspace block canvas.
* Generally should be called at the same time as setDragging_(false).
* @private
*/
Blockly.BlockSvg.prototype.moveOffDragSurface_ = function() {
// Translate to current position, turning off 3d.
var xy = this.getRelativeToSurfaceXY();
this.clearTransformAttributes_();
this.translate(xy.x, xy.y, false);
this.workspace.dragSurface.clearAndHide(this.workspace.getCanvas());
};
/**
* Clear the block of style="..." and transform="..." attributes.
* Used when the block is switching from 3d to 2d transform or vice versa.
* @private
*/
Blockly.BlockSvg.prototype.clearTransformAttributes_ = function() {
if (this.getSvgRoot().hasAttribute('transform')) {
this.getSvgRoot().removeAttribute('transform');
}
if (this.getSvgRoot().hasAttribute('style')) {
this.getSvgRoot().removeAttribute('style');
}
};
/**
* Drag this block to follow the mouse.
* @param {!Event} e Mouse move event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseMove_ = function(e) {
if (e.type == 'mousemove' && e.clientX <= 1 && e.clientY == 0 &&
e.button == 0) {
/* HACK:
Safari Mobile 6.0 and Chrome for Android 18.0 fire rogue mousemove
events on certain touch actions. Ignore events with these signatures.
This may result in a one-pixel blind spot in other browsers,
but this shouldn't be noticeable. */
e.stopPropagation();
return;
}
var oldXY = this.getRelativeToSurfaceXY();
var newXY = this.workspace.moveDrag(e);
if (Blockly.dragMode_ == Blockly.DRAG_STICKY) {
// Still dragging within the sticky DRAG_RADIUS.
var dr = goog.math.Coordinate.distance(oldXY, newXY) * this.workspace.scale;
if (dr > Blockly.DRAG_RADIUS) {
// Switch to unrestricted dragging.
Blockly.dragMode_ = Blockly.DRAG_FREE;
Blockly.longStop_();
// Must move to drag surface before unplug(),
// or else connections will calculate the wrong relative to surface XY
// in tighten_(). Then blocks connected to this block move around on the
// drag surface. By moving to the drag surface before unplug, connection
// positions will be calculated correctly.
this.moveToDragSurface_();
// Clear WidgetDiv/DropDownDiv without animating, in case blocks are moved
// around
Blockly.WidgetDiv.hide(true);
Blockly.DropDownDiv.hideWithoutAnimation();
if (this.parentBlock_) {
// Push this block to the very top of the stack.
this.unplug();
}
this.setDragging_(true);
}
}
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
this.handleDragFree_(oldXY, newXY, e);
}
// This event has been handled. No need to bubble up to the document.
e.stopPropagation();
e.preventDefault();
};
/**
* Handle a mouse movement when a block is already freely dragging.
* @param {!goog.math.Coordinate} oldXY The position of the block on screen
* before the most recent mouse movement.
* @param {!goog.math.Coordinate} newXY The new location after applying the
* mouse movement.
* @param {!Event} e Mouse move event.
* @private
*/
Blockly.BlockSvg.prototype.handleDragFree_ = function(oldXY, newXY, e) {
var dxy = goog.math.Coordinate.difference(oldXY, this.dragStartXY_);
this.workspace.dragSurface.translateSurface(newXY.x, newXY.y);
// Drag all the nested bubbles.
for (var i = 0; i < this.draggedBubbles_.length; i++) {
var commentData = this.draggedBubbles_[i];
commentData.bubble.setIconLocation(
goog.math.Coordinate.sum(commentData, dxy));
}
// Check to see if any of this block's connections are within range of
// another block's connection.
var myConnections = this.getConnections_(false);
// Also check the last connection on this stack
var lastOnStack = this.lastConnectionInStack();
if (lastOnStack && lastOnStack != this.nextConnection) {
myConnections.push(lastOnStack);
}
var closestConnection = null;
var localConnection = null;
var radiusConnection = Blockly.SNAP_RADIUS;
for (i = 0; i < myConnections.length; i++) {
var myConnection = myConnections[i];
var neighbour = myConnection.closest(radiusConnection, dxy);
if (neighbour.connection) {
closestConnection = neighbour.connection;
localConnection = myConnection;
radiusConnection = neighbour.radius;
}
}
var updatePreviews = true;
if (Blockly.localConnection_ && Blockly.highlightedConnection_) {
var xDiff = Blockly.localConnection_.x_ + dxy.x -
Blockly.highlightedConnection_.x_;
var yDiff = Blockly.localConnection_.y_ + dxy.y -
Blockly.highlightedConnection_.y_;
var curDistance = Math.sqrt(xDiff * xDiff + yDiff * yDiff);
// Slightly prefer the existing preview over a new preview.
if (closestConnection && radiusConnection > curDistance -
Blockly.CURRENT_CONNECTION_PREFERENCE) {
updatePreviews = false;
}
}
if (updatePreviews) {
var candidateIsLast = (localConnection == lastOnStack);
this.updatePreviews(closestConnection, localConnection, radiusConnection,
e, newXY.x - this.dragStartXY_.x, newXY.y - this.dragStartXY_.y,
candidateIsLast);
}
};
/**
* Preview the results of the drag if the mouse is released immediately.
* @param {Blockly.Connection} closestConnection The closest connection found
* during the search
* @param {Blockly.Connection} localConnection The connection on the moving
* block.
* @param {number} radiusConnection The distance between closestConnection and
* localConnection.
* @param {!Event} e Mouse move event.
* @param {number} dx The x distance the block has moved onscreen up to this
* point in the drag.
* @param {number} dy The y distance the block has moved onscreen up to this
* point in the drag.
* @param {boolean} candidateIsLast True if the dragging stack is more than one
* block long and localConnection is the last connection on the stack.
*/
Blockly.BlockSvg.prototype.updatePreviews = function(closestConnection,
localConnection, radiusConnection, e, dx, dy, candidateIsLast) {
// Don't fire events for insertion marker creation or movement.
Blockly.Events.disable();
// Remove an insertion marker if needed. For Scratch-Blockly we are using
// grayed-out blocks instead of highlighting the connection; for compatibility
// with Web Blockly the name "highlightedConnection" will still be used.
if (Blockly.highlightedConnection_ &&
Blockly.highlightedConnection_ != closestConnection) {
if (Blockly.replacementMarker_) {
Blockly.BlockSvg.removeReplacementMarker();
} else if (Blockly.insertionMarker_ && Blockly.insertionMarkerConnection_) {
Blockly.BlockSvg.disconnectInsertionMarker();
}
// If there's already an insertion marker but it's representing the wrong
// block, delete it so we can create the correct one.
if (Blockly.insertionMarker_ &&
((candidateIsLast && Blockly.localConnection_.sourceBlock_ == this) ||
(!candidateIsLast && Blockly.localConnection_.sourceBlock_ != this))) {
Blockly.insertionMarker_.dispose();
Blockly.insertionMarker_ = null;
}
Blockly.highlightedConnection_ = null;
Blockly.localConnection_ = null;
}
// Add an insertion marker or replacement marker if needed.
if (closestConnection &&
closestConnection != Blockly.highlightedConnection_ &&
!closestConnection.sourceBlock_.isInsertionMarker()) {
Blockly.highlightedConnection_ = closestConnection;
Blockly.localConnection_ = localConnection;
// Dragging a block over a nexisting block in an input should replace the
// existing block and bump it out. Similarly, dragging a terminal block
// over another (connected) terminal block will replace, not insert.
var shouldReplace = (localConnection.type == Blockly.OUTPUT_VALUE ||
(localConnection.type == Blockly.PREVIOUS_STATEMENT &&
closestConnection.isConnected() &&
!this.nextConnection));
if (shouldReplace) {
this.addReplacementMarker_(localConnection, closestConnection);
} else { // Should insert
this.connectInsertionMarker_(localConnection, closestConnection);
}
}
// Reenable events.
Blockly.Events.enable();
// Provide visual indication of whether the block will be deleted if
// dropped here.
if (this.isDeletable()) {
this.workspace.isDeleteArea(e);
}
};
/**
* Add highlighting showing which block will be replaced.
* @param {Blockly.Connection} localConnection The connection on the dragging
* block.
* @param {Blockly.Connection} closestConnection The connnection to pretend to
* connect to.
*/
Blockly.BlockSvg.prototype.addReplacementMarker_ = function(localConnection,
closestConnection) {
if (closestConnection.targetBlock()) {
Blockly.replacementMarker_ = closestConnection.targetBlock();
Blockly.replacementMarker_.highlightForReplacement(true);
} else if(localConnection.type == Blockly.OUTPUT_VALUE) {
Blockly.replacementMarker_ = closestConnection.sourceBlock_;
Blockly.replacementMarker_.highlightShapeForInput(closestConnection,
true);
}
};
/**
* Get rid of the highlighting marking the block that will be replaced.
*/
Blockly.BlockSvg.removeReplacementMarker = function() {
// If there's no block in place, but we're still connecting to a value input,
// then we must be highlighting an input shape.
if (Blockly.highlightedConnection_.type == Blockly.INPUT_VALUE &&
!Blockly.highlightedConnection_.isConnected()) {
Blockly.replacementMarker_.highlightShapeForInput(
Blockly.highlightedConnection_, false);
} else {
Blockly.replacementMarker_.highlightForReplacement(false);
}
Blockly.replacementMarker_ = null;
};
/**
* Place and render an insertion marker to indicate what would happen if you
* release the drag right now.
* @param {Blockly.Connection} localConnection The connection on the dragging
* block.
* @param {Blockly.Connection} closestConnection The connnection to connect the
* insertion marker to.
*/
Blockly.BlockSvg.prototype.connectInsertionMarker_ = function(localConnection,
closestConnection) {
if (!Blockly.insertionMarker_) {
Blockly.insertionMarker_ =
this.workspace.newBlock(Blockly.localConnection_.sourceBlock_.type);
Blockly.insertionMarker_.setInsertionMarker(true);
Blockly.insertionMarker_.initSvg();
}
var insertionMarker = Blockly.insertionMarker_;
var insertionMarkerConnection = insertionMarker.getMatchingConnection(
localConnection.sourceBlock_, localConnection);
if (insertionMarkerConnection != Blockly.insertionMarkerConnection_) {
insertionMarker.rendered = true;
// Render disconnected from everything else so that we have a valid
// connection location.
insertionMarker.render();
insertionMarker.getSvgRoot().setAttribute('visibility', 'visible');
this.positionNewBlock(insertionMarker,
insertionMarkerConnection, closestConnection);
if (insertionMarkerConnection.type == Blockly.PREVIOUS_STATEMENT &&
!insertionMarker.nextConnection) {
Blockly.bumpedConnection_ = closestConnection.targetConnection;
}
// Renders insertion marker.
insertionMarkerConnection.connect(closestConnection);
Blockly.insertionMarkerConnection_ = insertionMarkerConnection;
}
};
/**
* Disconnect the current insertion marker from the stack, and heal the stack to
* its previous state.
*/
Blockly.BlockSvg.disconnectInsertionMarker = function() {
// The insertion marker is the first block in a stack, either because it
// doesn't have a previous connection or because the previous connection is
// not connected. Unplug won't do anything in that case. Instead, unplug the
// following block.
if (Blockly.insertionMarkerConnection_ ==
Blockly.insertionMarker_.nextConnection &&
(!Blockly.insertionMarker_.previousConnection ||
!Blockly.insertionMarker_.previousConnection.targetConnection)) {
Blockly.insertionMarkerConnection_.targetBlock().unplug(false);
}
// Inside of a C-block, first statement connection.
else if (Blockly.insertionMarkerConnection_.type == Blockly.NEXT_STATEMENT &&
Blockly.insertionMarkerConnection_ !=
Blockly.insertionMarker_.nextConnection) {
var innerConnection = Blockly.insertionMarkerConnection_.targetConnection;
innerConnection.sourceBlock_.unplug(false);
var previousBlockNextConnection =
Blockly.insertionMarker_.previousConnection.targetConnection;
Blockly.insertionMarker_.unplug(true);
if (previousBlockNextConnection) {
previousBlockNextConnection.connect(innerConnection);
}
}
else {
Blockly.insertionMarker_.unplug(true /* healStack */);
}
if (Blockly.insertionMarkerConnection_.targetConnection) {
throw 'insertionMarkerConnection still connected at the end of disconnectInsertionMarker';
}
Blockly.insertionMarkerConnection_ = null;
Blockly.insertionMarker_.getSvgRoot().setAttribute('visibility', 'hidden');
};
/**
* Add or remove the UI indicating if this block is movable or not.
*/
Blockly.BlockSvg.prototype.updateMovable = function() {
if (this.isMovable()) {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDraggable');
} else {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDraggable');
}
};
/**
* Set whether this block is movable or not.
* @param {boolean} movable True if movable.
*/
Blockly.BlockSvg.prototype.setMovable = function(movable) {
Blockly.BlockSvg.superClass_.setMovable.call(this, movable);
this.updateMovable();
};
/**
* Set whether this block is editable or not.
* @param {boolean} editable True if editable.
*/
Blockly.BlockSvg.prototype.setEditable = function(editable) {
Blockly.BlockSvg.superClass_.setEditable.call(this, editable);
if (this.rendered) {
var icons = this.getIcons();
for (var i = 0; i < icons.length; i++) {
icons[i].updateEditable();
}
}
};
/**
* Set whether this block is a shadow block or not.
* @param {boolean} shadow True if a shadow.
*/
Blockly.BlockSvg.prototype.setShadow = function(shadow) {
Blockly.BlockSvg.superClass_.setShadow.call(this, shadow);
this.updateColour();
};
/**
* Set whether this block is an insertion marker block or not.
* @param {boolean} insertionMarker True if an insertion marker.
*/
Blockly.BlockSvg.prototype.setInsertionMarker = function(insertionMarker) {
Blockly.BlockSvg.superClass_.setInsertionMarker.call(this, insertionMarker);
this.updateColour();
};
/**
* Return the root node of the SVG or null if none exists.
* @return {Element} The root SVG node (probably a group).
*/
Blockly.BlockSvg.prototype.getSvgRoot = function() {
return this.svgGroup_;
};
/**
* Dispose of this block.
* @param {boolean} healStack If true, then try to heal any gap by connecting
* the next statement with the previous statement. Otherwise, dispose of
* all children of this block.
* @param {boolean} animate If true, show a disposal animation and sound.
*/
Blockly.BlockSvg.prototype.dispose = function(healStack, animate) {
Blockly.Tooltip.hide();
Blockly.Field.startCache();
// If this block is being dragged, unlink the mouse events.
if (Blockly.selected == this) {
this.unselect();
Blockly.terminateDrag_();
}
// If this block has a context menu open, close it.
if (Blockly.ContextMenu.currentBlock == this) {
Blockly.ContextMenu.hide();
}
if (animate && this.rendered) {
this.unplug(healStack);
this.disposeUiEffect();
}
// Stop rerendering.
this.rendered = false;
Blockly.Events.disable();
var icons = this.getIcons();
for (var i = 0; i < icons.length; i++) {
icons[i].dispose();
}
Blockly.Events.enable();
Blockly.BlockSvg.superClass_.dispose.call(this, healStack);
goog.dom.removeNode(this.svgGroup_);
// Sever JavaScript to DOM connections.
this.svgGroup_ = null;
this.svgPath_ = null;
Blockly.Field.stopCache();
};
/**
* Play some UI effects (sound, animation) when disposing of a block.
*/
Blockly.BlockSvg.prototype.disposeUiEffect = function() {
this.workspace.playAudio('delete');
var xy = Blockly.getSvgXY_(/** @type {!Element} */ (this.svgGroup_),
this.workspace);
// Deeply clone the current block.
var clone = this.svgGroup_.cloneNode(true);
clone.translateX_ = xy.x;
clone.translateY_ = xy.y;
clone.setAttribute('transform',
'translate(' + clone.translateX_ + ',' + clone.translateY_ + ')');
this.workspace.getParentSvg().appendChild(clone);
clone.bBox_ = clone.getBBox();
// Start the animation.
Blockly.BlockSvg.disposeUiStep_(clone, this.RTL, new Date(),
this.workspace.scale);
};
/**
* Play some UI effects (sound) after a connection has been established.
*/
Blockly.BlockSvg.prototype.connectionUiEffect = function() {
this.workspace.playAudio('click');
};
/**
* Animate a cloned block and eventually dispose of it.
* This is a class method, not an instace method since the original block has
* been destroyed and is no longer accessible.
* @param {!Element} clone SVG element to animate and dispose of.
* @param {boolean} rtl True if RTL, false if LTR.
* @param {!Date} start Date of animation's start.
* @param {number} workspaceScale Scale of workspace.
* @private
*/
Blockly.BlockSvg.disposeUiStep_ = function(clone, rtl, start, workspaceScale) {
var ms = (new Date()) - start;
var percent = ms / 150;
if (percent > 1) {
goog.dom.removeNode(clone);
} else {
var x = clone.translateX_ +
(rtl ? -1 : 1) * clone.bBox_.width * workspaceScale / 2 * percent;
var y = clone.translateY_ + clone.bBox_.height * workspaceScale * percent;
var scale = (1 - percent) * workspaceScale;
clone.setAttribute('transform', 'translate(' + x + ',' + y + ')' +
' scale(' + scale + ')');
var closure = function() {
Blockly.BlockSvg.disposeUiStep_(clone, rtl, start, workspaceScale);
};
setTimeout(closure, 10);
}
};
/**
* Enable or disable a block.
*/
Blockly.BlockSvg.prototype.updateDisabled = function() {
// not supported
};
/**
* Returns the comment on this block (or '' if none).
* @return {string} Block's comment.
*/
Blockly.BlockSvg.prototype.getCommentText = function() {
if (this.comment) {
var comment = this.comment.getText();
// Trim off trailing whitespace.
return comment.replace(/\s+$/, '').replace(/ +\n/g, '\n');
}
return '';
};
/**
* Set this block's comment text.
* @param {?string} text The text, or null to delete.
*/
Blockly.BlockSvg.prototype.setCommentText = function(text) {
var changedState = false;
if (goog.isString(text)) {
if (!this.comment) {
this.comment = new Blockly.Comment(this);
changedState = true;
}
this.comment.setText(/** @type {string} */ (text));
} else {
if (this.comment) {
this.comment.dispose();
changedState = true;
}
}
if (changedState && this.rendered) {
this.render();
// Adding or removing a comment icon will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Set this block's warning text.
* @param {?string} text The text, or null to delete.
* @param {string=} opt_id An optional ID for the warning text to be able to
* maintain multiple warnings.
*/
Blockly.BlockSvg.prototype.setWarningText = function(text, opt_id) {
if (!this.setWarningText.pid_) {
// Create a database of warning PIDs.
// Only runs once per block (and only those with warnings).
this.setWarningText.pid_ = Object.create(null);
}
var id = opt_id || '';
if (!id) {
// Kill all previous pending processes, this edit supercedes them all.
for (var n in this.setWarningText.pid_) {
clearTimeout(this.setWarningText.pid_[n]);
delete this.setWarningText.pid_[n];
}
} else if (this.setWarningText.pid_[id]) {
// Only queue up the latest change. Kill any earlier pending process.
clearTimeout(this.setWarningText.pid_[id]);
delete this.setWarningText.pid_[id];
}
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
// Don't change the warning text during a drag.
// Wait until the drag finishes.
var thisBlock = this;
this.setWarningText.pid_[id] = setTimeout(function() {
if (thisBlock.workspace) { // Check block wasn't deleted.
delete thisBlock.setWarningText.pid_[id];
thisBlock.setWarningText(text, id);
}
}, 100);
return;
}
if (this.isInFlyout) {
text = null;
}
var changedState = false;
if (goog.isString(text)) {
if (!this.warning) {
this.warning = new Blockly.Warning(this);
changedState = true;
}
this.warning.setText(/** @type {string} */ (text), id);
} else {
// Dispose all warnings if no id is given.
if (this.warning && !id) {
this.warning.dispose();
changedState = true;
} else if (this.warning) {
var oldText = this.warning.getText();
this.warning.setText('', id);
var newText = this.warning.getText();
if (!newText) {
this.warning.dispose();
}
changedState = oldText == newText;
}
}
if (changedState && this.rendered) {
this.render();
// Adding or removing a warning icon will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Give this block a mutator dialog.
* @param {Blockly.Mutator} mutator A mutator dialog instance or null to remove.
*/
Blockly.BlockSvg.prototype.setMutator = function(mutator) {
if (this.mutator && this.mutator !== mutator) {
this.mutator.dispose();
}
if (mutator) {
mutator.block_ = this;
this.mutator = mutator;
mutator.createIcon();
}
};
/**
* Select this block. Highlight it visually.
*/
Blockly.BlockSvg.prototype.addSelect = function() {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklySelected');
// Move the selected block to the top of the stack.
this.svgGroup_.parentNode.appendChild(this.svgGroup_);
};
/**
* Unselect this block. Remove its highlighting.
*/
Blockly.BlockSvg.prototype.removeSelect = function() {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklySelected');
};
/**
* Adds the dragging class to this block.
*/
Blockly.BlockSvg.prototype.addDragging = function() {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDragging');
};
/**
* Removes the dragging class from this block.
*/
Blockly.BlockSvg.prototype.removeDragging = function() {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDragging');
};
// Overrides of functions on Blockly.Block that take into account whether the
// block has been rendered.
/**
* Change the colour of a block.
* @param {number|string} colour HSV hue value, or #RRGGBB string.
* @param {number|string} colourSecondary Secondary HSV hue value, or #RRGGBB
* string.
* @param {number|string} colourTertiary Tertiary HSV hue value, or #RRGGBB
* string.
*/
Blockly.BlockSvg.prototype.setColour = function(colour, colourSecondary,
colourTertiary) {
Blockly.BlockSvg.superClass_.setColour.call(this, colour, colourSecondary,
colourTertiary);
if (this.rendered) {
this.updateColour();
}
};
/**
* Set whether this block can chain onto the bottom of another block.
* @param {boolean} newBoolean True if there can be a previous statement.
* @param {string|Array.<string>|null|undefined} opt_check Statement type or
* list of statement types. Null/undefined if any type could be connected.
*/
Blockly.BlockSvg.prototype.setPreviousStatement =
function(newBoolean, opt_check) {
/* eslint-disable indent */
Blockly.BlockSvg.superClass_.setPreviousStatement.call(this, newBoolean,
opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
}; /* eslint-enable indent */
/**
* Set whether another block can chain onto the bottom of this block.
* @param {boolean} newBoolean True if there can be a next statement.
* @param {string|Array.<string>|null|undefined} opt_check Statement type or
* list of statement types. Null/undefined if any type could be connected.
*/
Blockly.BlockSvg.prototype.setNextStatement = function(newBoolean, opt_check) {
Blockly.BlockSvg.superClass_.setNextStatement.call(this, newBoolean,
opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Set whether this block returns a value.
* @param {boolean} newBoolean True if there is an output.
* @param {string|Array.<string>|null|undefined} opt_check Returned type or list
* of returned types. Null or undefined if any type could be returned
* (e.g. variable get).
*/
Blockly.BlockSvg.prototype.setOutput = function(newBoolean, opt_check) {
Blockly.BlockSvg.superClass_.setOutput.call(this, newBoolean, opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Set whether value inputs are arranged horizontally or vertically.
* @param {boolean} newBoolean True if inputs are horizontal.
*/
Blockly.BlockSvg.prototype.setInputsInline = function(newBoolean) {
Blockly.BlockSvg.superClass_.setInputsInline.call(this, newBoolean);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Remove an input from this block.
* @param {string} name The name of the input.
* @param {boolean=} opt_quiet True to prevent error if input is not present.
* @throws {goog.asserts.AssertionError} if the input is not present and
* opt_quiet is not true.
*/
Blockly.BlockSvg.prototype.removeInput = function(name, opt_quiet) {
Blockly.BlockSvg.superClass_.removeInput.call(this, name, opt_quiet);
if (this.rendered) {
this.render();
// Removing an input will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Move a numbered input to a different location on this block.
* @param {number} inputIndex Index of the input to move.
* @param {number} refIndex Index of input that should be after the moved input.
*/
Blockly.BlockSvg.prototype.moveNumberedInputBefore = function(
inputIndex, refIndex) {
Blockly.BlockSvg.superClass_.moveNumberedInputBefore.call(this, inputIndex,
refIndex);
if (this.rendered) {
this.render();
// Moving an input will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Add a value input, statement input or local variable to this block.
* @param {number} type Either Blockly.INPUT_VALUE or Blockly.NEXT_STATEMENT or
* Blockly.DUMMY_INPUT.
* @param {string} name Language-neutral identifier which may used to find this
* input again. Should be unique to this block.
* @return {!Blockly.Input} The input object created.
* @private
*/
Blockly.BlockSvg.prototype.appendInput_ = function(type, name) {
var input = Blockly.BlockSvg.superClass_.appendInput_.call(this, type, name);
if (this.rendered) {
this.render();
// Adding an input will cause the block to change shape.
this.bumpNeighbours_();
}
return input;
};
/**
* Returns connections originating from this block.
* @param {boolean} all If true, return all connections even hidden ones.
* Otherwise, for a non-rendered block return an empty list, and for a
* collapsed block don't return inputs connections.
* @return {!Array.<!Blockly.Connection>} Array of connections.
* @private
*/
Blockly.BlockSvg.prototype.getConnections_ = function(all) {
var myConnections = [];
if (all || this.rendered) {
if (this.outputConnection) {
myConnections.push(this.outputConnection);
}
if (this.previousConnection) {
myConnections.push(this.previousConnection);
}
if (this.nextConnection) {
myConnections.push(this.nextConnection);
}
if (all || !this.collapsed_) {
for (var i = 0, input; input = this.inputList[i]; i++) {
if (input.connection) {
myConnections.push(input.connection);
}
}
}
}
return myConnections;
};
/**
* Create a connection of the specified type.
* @param {number} type The type of the connection to create.
* @return {!Blockly.RenderedConnection} A new connection of the specified type.
* @private
*/
Blockly.BlockSvg.prototype.makeConnection_ = function(type) {
return new Blockly.RenderedConnection(this, type);
};
| 1 | 7,870 | This should be before the if on line 1028, which should turn into an else if | LLK-scratch-blocks | js |
@@ -27,6 +27,13 @@ type ClusterNetworkPolicySpecBuilder struct {
Name string
}
+type ACNPRuleAppliedToSpec struct {
+ PodSelector map[string]string
+ NSSelector map[string]string
+ PodSelectorMatchExp *[]metav1.LabelSelectorRequirement
+ NSSelectorMatchExp *[]metav1.LabelSelectorRequirement
+}
+
func (b *ClusterNetworkPolicySpecBuilder) Get() *secv1alpha1.ClusterNetworkPolicy {
if b.Spec.Ingress == nil {
b.Spec.Ingress = []secv1alpha1.Rule{} | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
)
type ClusterNetworkPolicySpecBuilder struct {
Spec secv1alpha1.ClusterNetworkPolicySpec
Name string
}
func (b *ClusterNetworkPolicySpecBuilder) Get() *secv1alpha1.ClusterNetworkPolicy {
if b.Spec.Ingress == nil {
b.Spec.Ingress = []secv1alpha1.Rule{}
}
if b.Spec.Egress == nil {
b.Spec.Egress = []secv1alpha1.Rule{}
}
return &secv1alpha1.ClusterNetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: b.Name,
},
Spec: b.Spec,
}
}
func (b *ClusterNetworkPolicySpecBuilder) SetName(name string) *ClusterNetworkPolicySpecBuilder {
b.Name = name
return b
}
func (b *ClusterNetworkPolicySpecBuilder) SetPriority(p float64) *ClusterNetworkPolicySpecBuilder {
b.Spec.Priority = p
return b
}
func (b *ClusterNetworkPolicySpecBuilder) SetTier(tier string) *ClusterNetworkPolicySpecBuilder {
b.Spec.Tier = tier
return b
}
func (b *ClusterNetworkPolicySpecBuilder) SetAppliedToGroup(podSelector map[string]string,
nsSelector map[string]string,
podSelectorMatchExp *[]metav1.LabelSelectorRequirement,
nsSelectorMatchExp *[]metav1.LabelSelectorRequirement) *ClusterNetworkPolicySpecBuilder {
var ps *metav1.LabelSelector
var ns *metav1.LabelSelector
if podSelector != nil {
ps = &metav1.LabelSelector{
MatchLabels: podSelector,
}
if podSelectorMatchExp != nil {
ps.MatchExpressions = *podSelectorMatchExp
}
}
if podSelectorMatchExp != nil {
ps = &metav1.LabelSelector{
MatchExpressions: *podSelectorMatchExp,
}
}
if nsSelector != nil {
ns = &metav1.LabelSelector{
MatchLabels: nsSelector,
}
if nsSelectorMatchExp != nil {
ns.MatchExpressions = *nsSelectorMatchExp
}
}
if nsSelectorMatchExp != nil {
ns = &metav1.LabelSelector{
MatchExpressions: *nsSelectorMatchExp,
}
}
appliedToPeer := secv1alpha1.NetworkPolicyPeer{
PodSelector: ps,
NamespaceSelector: ns,
}
b.Spec.AppliedTo = append(b.Spec.AppliedTo, appliedToPeer)
return b
}
func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc v1.Protocol,
port *int, portName *string, cidr *string,
podSelector map[string]string, nsSelector map[string]string,
podSelectorMatchExp *[]metav1.LabelSelectorRequirement, nsSelectorMatchExp *[]metav1.LabelSelectorRequirement,
action secv1alpha1.RuleAction, name string) *ClusterNetworkPolicySpecBuilder {
var ps *metav1.LabelSelector
var ns *metav1.LabelSelector
if b.Spec.Ingress == nil {
b.Spec.Ingress = []secv1alpha1.Rule{}
}
if podSelector != nil {
ps = &metav1.LabelSelector{
MatchLabels: podSelector,
}
if podSelectorMatchExp != nil {
ps.MatchExpressions = *podSelectorMatchExp
}
}
if podSelectorMatchExp != nil {
ps = &metav1.LabelSelector{
MatchExpressions: *podSelectorMatchExp,
}
}
if nsSelector != nil {
ns = &metav1.LabelSelector{
MatchLabels: nsSelector,
}
if nsSelectorMatchExp != nil {
ns.MatchExpressions = *nsSelectorMatchExp
}
}
if nsSelectorMatchExp != nil {
ns = &metav1.LabelSelector{
MatchExpressions: *nsSelectorMatchExp,
}
}
var ipBlock *secv1alpha1.IPBlock
if cidr != nil {
ipBlock = &secv1alpha1.IPBlock{
CIDR: *cidr,
}
}
var policyPeer []secv1alpha1.NetworkPolicyPeer
if ps != nil || ns != nil || ipBlock != nil {
policyPeer = []secv1alpha1.NetworkPolicyPeer{{
PodSelector: ps,
NamespaceSelector: ns,
IPBlock: ipBlock,
}}
}
var ports []secv1alpha1.NetworkPolicyPort
if port != nil && portName != nil {
panic("specify portname or port, not both")
}
if port != nil {
ports = []secv1alpha1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{IntVal: int32(*port)},
Protocol: &protoc,
},
}
}
if portName != nil {
ports = []secv1alpha1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{Type: intstr.String, StrVal: *portName},
Protocol: &protoc,
},
}
}
newRule := secv1alpha1.Rule{
From: policyPeer,
Ports: ports,
Action: &action,
Name: name,
}
b.Spec.Ingress = append(b.Spec.Ingress, newRule)
return b
}
func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc v1.Protocol,
port *int, portName *string, cidr *string,
podSelector map[string]string, nsSelector map[string]string,
podSelectorMatchExp *[]metav1.LabelSelectorRequirement, nsSelectorMatchExp *[]metav1.LabelSelectorRequirement,
action secv1alpha1.RuleAction, name string) *ClusterNetworkPolicySpecBuilder {
// For simplicity, we just reuse the Ingress code here. The underlying data model for ingress/egress is identical
// With the exception of calling the rule `To` vs. `From`.
c := &ClusterNetworkPolicySpecBuilder{}
c.AddIngress(protoc, port, portName, cidr, podSelector, nsSelector, podSelectorMatchExp, nsSelectorMatchExp, action, name)
theRule := c.Get().Spec.Ingress[0]
b.Spec.Egress = append(b.Spec.Egress, secv1alpha1.Rule{
To: theRule.From,
Ports: theRule.Ports,
Action: theRule.Action,
Name: theRule.Name,
})
return b
}
// AddEgressDNS mutates the nth policy rule to allow DNS, convenience method
func (b *ClusterNetworkPolicySpecBuilder) WithEgressDNS() *ClusterNetworkPolicySpecBuilder {
protocolUDP := v1.ProtocolUDP
route53 := secv1alpha1.NetworkPolicyPort{
Protocol: &protocolUDP,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
}
for i, e := range b.Spec.Egress {
e.Ports = append(e.Ports, route53)
b.Spec.Egress[i] = e
}
return b
}
func (b *ClusterNetworkPolicySpecBuilder) AddEgressLogging() *ClusterNetworkPolicySpecBuilder {
for i, e := range b.Spec.Egress {
e.EnableLogging = true
b.Spec.Egress[i] = e
}
return b
}
| 1 | 26,380 | Not really introduced by this PR, but I don't recall why these have to be pointers, while `PodSelector` / `NSSelector` do not. Do you know the reason? | antrea-io-antrea | go |
@@ -98,10 +98,10 @@ func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) {
r.updateRecentMinRTT(sendDelta, now)
// Correct for ackDelay if information received from the peer results in a
- // positive RTT sample. Otherwise, we use the sendDelta as a reasonable
- // measure for smoothedRTT.
+ // an RTT sample at least as large as minRTT. Otherwise, only use the
+ // sendDelta.
sample := sendDelta
- if sample > ackDelay {
+ if sample-r.minRTT >= ackDelay {
sample -= ackDelay
}
r.latestRTT = sample | 1 | package congestion
import (
"time"
"github.com/lucas-clemente/quic-go/internal/utils"
)
const (
// Note: This constant is also defined in the ackhandler package.
initialRTTus = 100 * 1000
rttAlpha float32 = 0.125
oneMinusAlpha float32 = (1 - rttAlpha)
rttBeta float32 = 0.25
oneMinusBeta float32 = (1 - rttBeta)
halfWindow float32 = 0.5
quarterWindow float32 = 0.25
)
type rttSample struct {
rtt time.Duration
time time.Time
}
// RTTStats provides round-trip statistics
type RTTStats struct {
initialRTTus int64
recentMinRTTwindow time.Duration
minRTT time.Duration
latestRTT time.Duration
smoothedRTT time.Duration
meanDeviation time.Duration
numMinRTTsamplesRemaining uint32
newMinRTT rttSample
recentMinRTT rttSample
halfWindowRTT rttSample
quarterWindowRTT rttSample
}
// NewRTTStats makes a properly initialized RTTStats object
func NewRTTStats() *RTTStats {
return &RTTStats{
initialRTTus: initialRTTus,
recentMinRTTwindow: utils.InfDuration,
}
}
// InitialRTTus is the initial RTT in us
func (r *RTTStats) InitialRTTus() int64 { return r.initialRTTus }
// MinRTT Returns the minRTT for the entire connection.
// May return Zero if no valid updates have occurred.
func (r *RTTStats) MinRTT() time.Duration { return r.minRTT }
// LatestRTT returns the most recent rtt measurement.
// May return Zero if no valid updates have occurred.
func (r *RTTStats) LatestRTT() time.Duration { return r.latestRTT }
// RecentMinRTT the minRTT since SampleNewRecentMinRtt has been called, or the
// minRTT for the entire connection if SampleNewMinRtt was never called.
func (r *RTTStats) RecentMinRTT() time.Duration { return r.recentMinRTT.rtt }
// SmoothedRTT returns the EWMA smoothed RTT for the connection.
// May return Zero if no valid updates have occurred.
func (r *RTTStats) SmoothedRTT() time.Duration { return r.smoothedRTT }
// GetQuarterWindowRTT gets the quarter window RTT
func (r *RTTStats) GetQuarterWindowRTT() time.Duration { return r.quarterWindowRTT.rtt }
// GetHalfWindowRTT gets the half window RTT
func (r *RTTStats) GetHalfWindowRTT() time.Duration { return r.halfWindowRTT.rtt }
// MeanDeviation gets the mean deviation
func (r *RTTStats) MeanDeviation() time.Duration { return r.meanDeviation }
// SetRecentMinRTTwindow sets how old a recent min rtt sample can be.
func (r *RTTStats) SetRecentMinRTTwindow(recentMinRTTwindow time.Duration) {
r.recentMinRTTwindow = recentMinRTTwindow
}
// UpdateRTT updates the RTT based on a new sample.
func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) {
if sendDelta == utils.InfDuration || sendDelta <= 0 {
utils.Debugf("Ignoring measured sendDelta, because it's is either infinite, zero, or negative: %d", sendDelta/time.Microsecond)
return
}
// Update r.minRTT first. r.minRTT does not use an rttSample corrected for
// ackDelay but the raw observed sendDelta, since poor clock granularity at
// the client may cause a high ackDelay to result in underestimation of the
// r.minRTT.
if r.minRTT == 0 || r.minRTT > sendDelta {
r.minRTT = sendDelta
}
r.updateRecentMinRTT(sendDelta, now)
// Correct for ackDelay if information received from the peer results in a
// positive RTT sample. Otherwise, we use the sendDelta as a reasonable
// measure for smoothedRTT.
sample := sendDelta
if sample > ackDelay {
sample -= ackDelay
}
r.latestRTT = sample
// First time call.
if r.smoothedRTT == 0 {
r.smoothedRTT = sample
r.meanDeviation = sample / 2
} else {
r.meanDeviation = time.Duration(oneMinusBeta*float32(r.meanDeviation/time.Microsecond)+rttBeta*float32(utils.AbsDuration(r.smoothedRTT-sample)/time.Microsecond)) * time.Microsecond
r.smoothedRTT = time.Duration((float32(r.smoothedRTT/time.Microsecond)*oneMinusAlpha)+(float32(sample/time.Microsecond)*rttAlpha)) * time.Microsecond
}
}
func (r *RTTStats) updateRecentMinRTT(sample time.Duration, now time.Time) { // Recent minRTT update.
if r.numMinRTTsamplesRemaining > 0 {
r.numMinRTTsamplesRemaining--
if r.newMinRTT.rtt == 0 || sample <= r.newMinRTT.rtt {
r.newMinRTT = rttSample{rtt: sample, time: now}
}
if r.numMinRTTsamplesRemaining == 0 {
r.recentMinRTT = r.newMinRTT
r.halfWindowRTT = r.newMinRTT
r.quarterWindowRTT = r.newMinRTT
}
}
// Update the three recent rtt samples.
if r.recentMinRTT.rtt == 0 || sample <= r.recentMinRTT.rtt {
r.recentMinRTT = rttSample{rtt: sample, time: now}
r.halfWindowRTT = r.recentMinRTT
r.quarterWindowRTT = r.recentMinRTT
} else if sample <= r.halfWindowRTT.rtt {
r.halfWindowRTT = rttSample{rtt: sample, time: now}
r.quarterWindowRTT = r.halfWindowRTT
} else if sample <= r.quarterWindowRTT.rtt {
r.quarterWindowRTT = rttSample{rtt: sample, time: now}
}
// Expire old min rtt samples.
if r.recentMinRTT.time.Before(now.Add(-r.recentMinRTTwindow)) {
r.recentMinRTT = r.halfWindowRTT
r.halfWindowRTT = r.quarterWindowRTT
r.quarterWindowRTT = rttSample{rtt: sample, time: now}
} else if r.halfWindowRTT.time.Before(now.Add(-time.Duration(float32(r.recentMinRTTwindow/time.Microsecond)*halfWindow) * time.Microsecond)) {
r.halfWindowRTT = r.quarterWindowRTT
r.quarterWindowRTT = rttSample{rtt: sample, time: now}
} else if r.quarterWindowRTT.time.Before(now.Add(-time.Duration(float32(r.recentMinRTTwindow/time.Microsecond)*quarterWindow) * time.Microsecond)) {
r.quarterWindowRTT = rttSample{rtt: sample, time: now}
}
}
// SampleNewRecentMinRTT forces RttStats to sample a new recent min rtt within the next
// |numSamples| UpdateRTT calls.
func (r *RTTStats) SampleNewRecentMinRTT(numSamples uint32) {
r.numMinRTTsamplesRemaining = numSamples
r.newMinRTT = rttSample{}
}
// OnConnectionMigration is called when connection migrates and rtt measurement needs to be reset.
func (r *RTTStats) OnConnectionMigration() {
r.latestRTT = 0
r.minRTT = 0
r.smoothedRTT = 0
r.meanDeviation = 0
r.initialRTTus = initialRTTus
r.numMinRTTsamplesRemaining = 0
r.recentMinRTTwindow = utils.InfDuration
r.recentMinRTT = rttSample{}
r.halfWindowRTT = rttSample{}
r.quarterWindowRTT = rttSample{}
}
// ExpireSmoothedMetrics causes the smoothed_rtt to be increased to the latest_rtt if the latest_rtt
// is larger. The mean deviation is increased to the most recent deviation if
// it's larger.
func (r *RTTStats) ExpireSmoothedMetrics() {
r.meanDeviation = utils.MaxDuration(r.meanDeviation, utils.AbsDuration(r.smoothedRTT-r.latestRTT))
r.smoothedRTT = utils.MaxDuration(r.smoothedRTT, r.latestRTT)
}
| 1 | 7,278 | Is there a reason why we are ignoring the ackDelay if it would result in a value smaller than the min? Why not `max(sample - ackDelay, minRTT)`? | lucas-clemente-quic-go | go |
@@ -10,8 +10,8 @@ namespace Microsoft.CodeAnalysis.Sarif
/// <summary>
/// Defines methods to support the comparison of objects of type SarifLog for equality.
/// </summary>
- [GeneratedCode("Microsoft.Json.Schema.ToDotNet", "0.31.0.0")]
- public sealed class SarifLogEqualityComparer : IEqualityComparer<SarifLog>
+ [GeneratedCode("Microsoft.Json.Schema.ToDotNet", "0.32.0.0")]
+ internal sealed class SarifLogEqualityComparer : IEqualityComparer<SarifLog>
{
internal static readonly SarifLogEqualityComparer Instance = new SarifLogEqualityComparer();
| 1 | // Copyright (c) Microsoft. All Rights Reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.CodeDom.Compiler;
using System.Collections.Generic;
namespace Microsoft.CodeAnalysis.Sarif
{
/// <summary>
/// Defines methods to support the comparison of objects of type SarifLog for equality.
/// </summary>
[GeneratedCode("Microsoft.Json.Schema.ToDotNet", "0.31.0.0")]
public sealed class SarifLogEqualityComparer : IEqualityComparer<SarifLog>
{
internal static readonly SarifLogEqualityComparer Instance = new SarifLogEqualityComparer();
public bool Equals(SarifLog left, SarifLog right)
{
if (ReferenceEquals(left, right))
{
return true;
}
if (ReferenceEquals(left, null) || ReferenceEquals(right, null))
{
return false;
}
if (left.SchemaUri != right.SchemaUri)
{
return false;
}
if (left.Version != right.Version)
{
return false;
}
if (!Object.ReferenceEquals(left.Runs, right.Runs))
{
if (left.Runs == null || right.Runs == null)
{
return false;
}
if (left.Runs.Count != right.Runs.Count)
{
return false;
}
for (int index_0 = 0; index_0 < left.Runs.Count; ++index_0)
{
if (!Run.ValueComparer.Equals(left.Runs[index_0], right.Runs[index_0]))
{
return false;
}
}
}
return true;
}
public int GetHashCode(SarifLog obj)
{
if (ReferenceEquals(obj, null))
{
return 0;
}
int result = 17;
unchecked
{
if (obj.SchemaUri != null)
{
result = (result * 31) + obj.SchemaUri.GetHashCode();
}
result = (result * 31) + obj.Version.GetHashCode();
if (obj.Runs != null)
{
foreach (var value_0 in obj.Runs)
{
result = result * 31;
if (value_0 != null)
{
result = (result * 31) + value_0.GetHashCode();
}
}
}
}
return result;
}
}
} | 1 | 10,842 | Here's an example of how the files in the `NotYetGenerated` directory drifted out of sync from the generated files. When we made the equality comparers internal, we neglected to fix this one. | microsoft-sarif-sdk | .cs |
@@ -19,6 +19,10 @@ class GuidancePolicy < ApplicationPolicy
user.can_modify_guidance? && guidance.in_group_belonging_to?(user.org_id)
end
+ def index?
+ admin_index?
+ end
+
def admin_index?
user.can_modify_guidance?
end | 1 | class GuidancePolicy < ApplicationPolicy
attr_reader :user, :guidance
def initialize(user, guidance)
raise Pundit::NotAuthorizedError, "must be logged in" unless user
@user = user
@guidance = guidance
end
def admin_show?
user.can_modify_guidance? && guidance.in_group_belonging_to?(user.org_id)
end
def admin_edit?
user.can_modify_guidance? && guidance.in_group_belonging_to?(user.org_id)
end
def admin_update?
user.can_modify_guidance? && guidance.in_group_belonging_to?(user.org_id)
end
def admin_index?
user.can_modify_guidance?
end
def admin_new?
user.can_modify_guidance?
end
def admin_create?
user.can_modify_guidance?
end
def admin_destroy?
user.can_modify_guidance? && guidance.in_group_belonging_to?(user.org_id)
end
def admin_publish?
user.can_modify_guidance?
end
def admin_unpublish?
user.can_modify_guidance?
end
def update_phases?
user.can_modify_guidance?
end
def update_versions?
user.can_modify_guidance?
end
def update_sections?
user.can_modify_guidance?
end
def update_questions?
user.can_modify_guidance?
end
class Scope < Scope
def resolve
scope = Guidance.includes(:guidance_group, :themes).by_org(user.org_id)
end
end
end | 1 | 17,327 | nice. we should do this elsewhere too. We have a lot of repeated stuff in the policies | DMPRoadmap-roadmap | rb |
@@ -612,3 +612,14 @@ TEST_F(BadPonyTest, FieldReferenceInDefaultArgument)
TEST_ERRORS_1(src, "can't reference 'this' in a default argument");
}
+
+
+TEST_F(BadPonyTest, DefaultArgScope)
+{
+ const char* src =
+ "actor A\n"
+ " fun foo(x: None = (let y = None; y)) =>\n"
+ " y";
+
+ TEST_ERRORS_1(src, "can't find declaration of 'y'");
+} | 1 | #include <gtest/gtest.h>
#include <platform.h>
#include "util.h"
/** Pony code that parses, but is erroneous. Typically type check errors and
* things used in invalid contexts.
*
* We build all the way up to and including code gen and check that we do not
* assert, segfault, etc but that the build fails and at least one error is
* reported.
*
* There is definite potential for overlap with other tests but this is also a
* suitable location for tests which don't obviously belong anywhere else.
*/
#define TEST_COMPILE(src) DO(test_compile(src, "all"))
#define TEST_ERRORS_1(src, err1) \
{ const char* errs[] = {err1, NULL}; \
DO(test_expected_errors(src, "ir", errs)); }
#define TEST_ERRORS_2(src, err1, err2) \
{ const char* errs[] = {err1, err2, NULL}; \
DO(test_expected_errors(src, "ir", errs)); }
#define TEST_ERRORS_3(src, err1, err2, err3) \
{ const char* errs[] = {err1, err2, err3, NULL}; \
DO(test_expected_errors(src, "ir", errs)); }
class BadPonyTest : public PassTest
{};
// Cases from reported issues
TEST_F(BadPonyTest, ClassInOtherClassProvidesList)
{
// From issue #218
const char* src =
"class Named\n"
"class Dog is Named\n"
"actor Main\n"
" new create(env: Env) =>\n"
" None";
TEST_ERRORS_1(src, "can only provide traits and interfaces");
}
TEST_F(BadPonyTest, TypeParamMissingForTypeInProvidesList)
{
// From issue #219
const char* src =
"trait Bar[A]\n"
" fun bar(a: A) =>\n"
" None\n"
"trait Foo is Bar // here also should be a type argument, like Bar[U8]\n"
" fun foo() =>\n"
" None\n"
"actor Main\n"
" new create(env: Env) =>\n"
" None";
TEST_ERRORS_1(src, "not enough type arguments");
}
TEST_F(BadPonyTest, TupleIndexIsZero)
{
// From issue #397
const char* src =
"primitive Foo\n"
" fun bar(): None =>\n"
" (None, None)._0";
TEST_ERRORS_1(src, "Did you mean _1?");
}
TEST_F(BadPonyTest, TupleIndexIsOutOfRange)
{
// From issue #397
const char* src =
"primitive Foo\n"
" fun bar(): None =>\n"
" (None, None)._3";
TEST_ERRORS_1(src, "Valid range is [1, 2]");
}
TEST_F(BadPonyTest, InvalidLambdaReturnType)
{
// From issue #828
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" {(): tag => this }\n";
TEST_ERRORS_1(src, "lambda return type: tag");
}
TEST_F(BadPonyTest, InvalidMethodReturnType)
{
// From issue #828
const char* src =
"primitive Foo\n"
" fun bar(): iso =>\n"
" U32(1)\n";
TEST_ERRORS_1(src, "function return type: iso");
}
TEST_F(BadPonyTest, ObjectLiteralUninitializedField)
{
// From issue #879
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" object\n"
" let x: I32\n"
" end";
TEST_ERRORS_1(src, "object literal fields must be initialized");
}
TEST_F(BadPonyTest, LambdaCaptureVariableBeforeDeclarationWithTypeInferenceExpressionFail)
{
// From issue #1018
const char* src =
"class Foo\n"
" fun f() =>\n"
" {()(x) => None }\n"
" let x = 0";
TEST_ERRORS_1(src, "declaration of 'x' appears after use");
}
// TODO: This test is not correct because it does not fail without the fix.
// I do not know how to generate a test that calls genheader().
// Comments are welcomed.
TEST_F(BadPonyTest, ExportedActorWithVariadicReturnTypeContainingNone)
{
// From issue #891
const char* src =
"primitive T\n"
"\n"
"actor @A\n"
" fun f(a: T): (T | None) =>\n"
" a\n";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, TypeAliasRecursionThroughTypeParameterInTuple)
{
// From issue #901
const char* src =
"type Foo is (Map[Foo, Foo], None)\n"
"actor Main\n"
" new create(env: Env) =>\n"
" None";
TEST_ERRORS_1(src, "type aliases can't be recursive");
}
TEST_F(BadPonyTest, ParenthesisedReturn)
{
// From issue #1050
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" (return)";
TEST_ERRORS_1(src, "use return only to exit early from a method");
}
TEST_F(BadPonyTest, ParenthesisedReturn2)
{
// From issue #1050
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" foo()\n"
" fun foo(): U64 =>\n"
" (return 0)\n"
" 2";
TEST_ERRORS_1(src, "Unreachable code");
}
TEST_F(BadPonyTest, MatchUncalledMethod)
{
// From issue #903
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" match foo\n"
" | None => None\n"
" end\n"
" fun foo() =>\n"
" None";
TEST_ERRORS_2(src, "can't reference a method without calling it",
"this pattern can never match");
}
TEST_F(BadPonyTest, TupleFieldReassign)
{
// From issue #1101
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" var foo: (U64, String) = (42, \"foo\")\n"
" foo._2 = \"bar\"";
TEST_ERRORS_1(src, "can't assign to an element of a tuple");
}
TEST_F(BadPonyTest, WithBlockTypeInference)
{
// From issue #1135
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" with x = 1 do None end";
TEST_ERRORS_3(src, "could not infer literal type, no valid types found",
"cannot infer type of $1$0",
"cannot infer type of x");
}
TEST_F(BadPonyTest, EmbedNestedTuple)
{
// From issue #1136
const char* src =
"class Foo\n"
" fun get_foo(): Foo => Foo\n"
"actor Main\n"
" embed foo: Foo\n"
" let x: U64\n"
" new create(env: Env) =>\n"
" (foo, x) = (Foo.get_foo(), 42)";
TEST_ERRORS_1(src, "an embedded field must be assigned using a constructor");
}
TEST_F(BadPonyTest, CircularTypeInfer)
{
// From issue #1334
const char* src =
"actor Main\n"
"new create(env: Env) =>\n"
"let x = x.create()\n"
"let y = y.create()";
TEST_ERRORS_2(src,
"can't use an undefined variable in an expression",
"can't use an undefined variable in an expression");
}
TEST_F(BadPonyTest, CallConstructorOnTypeIntersection)
{
// From issue #1398
const char* src =
"interface Foo\n"
"type Isect is (None & Foo)\n"
"actor Main\n"
" new create(env: Env) =>\n"
" Isect.create()";
TEST_ERRORS_1(src, "can't call a constructor on a type intersection");
}
TEST_F(BadPonyTest, AssignToFieldOfIso)
{
// From issue #1469
const char* src =
"class Foo\n"
" var x: String ref = String\n"
" fun iso bar(): String iso^ =>\n"
" let s = recover String end\n"
" x = s\n"
" consume s\n"
" fun ref foo(): String iso^ =>\n"
" let s = recover String end\n"
" let y: Foo iso = Foo\n"
" y.x = s\n"
" consume s";
TEST_ERRORS_2(src,
"right side must be a subtype of left side",
"right side must be a subtype of left side"
);
}
TEST_F(BadPonyTest, IndexArrayWithBrackets)
{
// From issue #1493
const char* src =
"actor Main\n"
"new create(env: Env) =>\n"
"let xs = [as I64: 1; 2; 3]\n"
"xs[1]";
TEST_ERRORS_1(src, "Value formal parameters not yet supported");
}
TEST_F(BadPonyTest, ShadowingBuiltinTypeParameter)
{
const char* src =
"class A[I8]\n"
"let b: U8 = 0";
TEST_ERRORS_1(src, "type parameter shadows existing type");
}
TEST_F(BadPonyTest, ShadowingTypeParameterInSameFile)
{
const char* src =
"trait B\n"
"class A[B]";
TEST_ERRORS_1(src, "can't reuse name 'B'");
}
TEST_F(BadPonyTest, TupleToUnionGentrace)
{
// From issue #1561
const char* src =
"primitive X\n"
"primitive Y\n"
"class iso T\n"
"actor Main\n"
" new create(env: Env) =>\n"
" this((T, Y))\n"
" be apply(m: (X | (T, Y))) => None";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, RefCapViolationViaCapReadTypeParameter)
{
// From issue #1328
const char* src =
"class Foo\n"
"var i: USize = 0\n"
"fun ref boom() => i = 3\n"
"actor Main\n"
"new create(env: Env) =>\n"
"let a: Foo val = Foo\n"
"call_boom[Foo val](a)\n"
"fun call_boom[A: Foo #read](x: A) =>\n"
"x.boom()";
TEST_ERRORS_1(src, "receiver type is not a subtype of target type");
}
TEST_F(BadPonyTest, RefCapViolationViaCapAnyTypeParameter)
{
// From issue #1328
const char* src =
"class Foo\n"
"var i: USize = 0\n"
"fun ref boom() => i = 3\n"
"actor Main\n"
"new create(env: Env) =>\n"
"let a: Foo val = Foo\n"
"call_boom[Foo val](a)\n"
"fun call_boom[A: Foo #any](x: A) =>\n"
"x.boom()";
TEST_ERRORS_1(src, "receiver type is not a subtype of target type");
}
TEST_F(BadPonyTest, TypeParamArrowClass)
{
// From issue #1687
const char* src =
"class C1\n"
"trait Test[A]\n"
"fun foo(a: A): A->C1";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, ArrowTypeParamInTypeConstraint)
{
// From issue #1694
const char* src =
"trait T1[A: B->A, B]\n"
"trait T2[A: box->B, B]";
TEST_ERRORS_2(src,
"arrow types can't be used as type constraints",
"arrow types can't be used as type constraints");
}
TEST_F(BadPonyTest, ArrowTypeParamInMethodConstraint)
{
// From issue #1809
const char* src =
"class Foo\n"
" fun foo[X: box->Y, Y](x: X) => None";
TEST_ERRORS_1(src,
"arrow types can't be used as type constraints");
}
TEST_F(BadPonyTest, AnnotatedIfClause)
{
// From issue #1751
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" if \\likely\\ U32(1) == 1 then\n"
" None\n"
" end\n";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, CapSubtypeInConstrainSubtyping)
{
// From PR #1816
const char* src =
"trait T\n"
" fun alias[X: Any iso](x: X!): X^\n"
"class C is T\n"
" fun alias[X: Any tag](x: X!): X^ => x\n";
TEST_ERRORS_1(src,
"type does not implement its provides list");
}
TEST_F(BadPonyTest, ObjectInheritsLaterTraitMethodWithParameter)
{
// From issue #1715
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" object is T end\n"
"trait T\n"
" fun apply(n: I32): Bool =>\n"
" n == 0\n";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, AddressofMissingTypearg)
{
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" @foo[None](addressof fn)\n"
" fun fn[A]() => None";
TEST_ERRORS_1(src,
"not enough type arguments");
}
TEST_F(BadPonyTest, ThisDotFieldRef)
{
// From issue #1865
const char* src =
"actor Main\n"
" let f: U8\n"
" new create(env: Env) =>\n"
" this.f = 1\n";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, CapSetInConstraintTypeParam)
{
const char* src =
"class A[X]\n"
"class B[X: A[Any #read]]\n";
TEST_ERRORS_1(src,
"a capability set can only appear in a type constraint");
}
TEST_F(BadPonyTest, MatchCasePatternConstructorTooFewArguments)
{
const char* src =
"class C\n"
" new create(key: String) => None\n"
"primitive Foo\n"
" fun apply(c: (C | None)) =>\n"
" match c\n"
" | C => None\n"
" end";
TEST_ERRORS_1(src, "not enough arguments");
}
TEST_F(BadPonyTest, ThisDotWhereDefIsntInTheTrait)
{
// From issue #1878
const char* src =
"trait T\n"
" fun foo(): USize => this.u\n"
"class C is T\n"
" var u: USize = 0\n";
TEST_ERRORS_1(src,
"can't find declaration of 'u'");
}
TEST_F(BadPonyTest, DontCareTypeInTupleTypeOfIfBlockValueUnused)
{
// From issue #1896
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" if true then\n"
" (var a, let _) = test()\n"
" end\n"
" fun test(): (U32, U32) =>\n"
" (1, 2)\n";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, ExhaustiveMatchCasesJumpAway)
{
// From issue #1898
const char* src =
"primitive Foo\n"
" fun apply(b: Bool) =>\n"
" if true then\n"
" match b\n"
" | let b': Bool => return\n"
" end\n"
" end";
TEST_COMPILE(src);
}
TEST_F(BadPonyTest, CallArgTypeErrorInsideTuple)
{
// From issue #1895
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" (\"\", foo([\"\"]))\n"
" fun foo(x: Array[USize]) => None";
TEST_ERRORS_1(src, "argument not a subtype of parameter");
}
TEST_F(BadPonyTest, NonExistFieldReferenceInConstructor)
{
// From issue #1932
const char* src =
"actor Main\n"
" new create(env: Env) =>\n"
" this.x = None";
TEST_ERRORS_2(src,
"can't find declaration of 'x'",
"left side must be something that can be assigned to");
}
TEST_F(BadPonyTest, TypeArgErrorInsideReturn)
{
const char* src =
"primitive P[A]\n"
"primitive Foo\n"
" fun apply(): (P[None], U8) =>\n"
" if true then\n"
" return (P, 0)\n"
" end\n"
" (P[None], 1)";
TEST_ERRORS_1(src, "not enough type arguments");
}
TEST_F(BadPonyTest, FieldReferenceInDefaultArgument)
{
const char* src =
"actor Main\n"
" let _env: Env\n"
" new create(env: Env) =>\n"
" _env = env\n"
" foo()\n"
" fun foo(env: Env = _env) =>\n"
" None";
TEST_ERRORS_1(src, "can't reference 'this' in a default argument");
}
| 1 | 10,737 | Small formatting thing, but can you remove the space before the semicolon? | ponylang-ponyc | c |
@@ -38,7 +38,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Performance
var reading = Task.Run(async () =>
{
- int remaining = InnerLoopCount * _writeLenght;
+ long remaining = InnerLoopCount * _writeLenght;
while (remaining != 0)
{
var result = await _pipe.Reader.ReadAsync(); | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System.Threading.Tasks;
using BenchmarkDotNet.Attributes;
using Microsoft.AspNetCore.Server.Kestrel.Internal.System.IO.Pipelines;
namespace Microsoft.AspNetCore.Server.Kestrel.Performance
{
[Config(typeof(CoreConfig))]
public class PipeThroughputBenchmark
{
private const int _writeLenght = 57;
private const int InnerLoopCount = 512;
private IPipe _pipe;
private PipeFactory _pipelineFactory;
[Setup]
public void Setup()
{
_pipelineFactory = new PipeFactory();
_pipe = _pipelineFactory.Create();
}
[Benchmark(OperationsPerInvoke = InnerLoopCount)]
public void ParseLiveAspNetTwoTasks()
{
var writing = Task.Run(async () =>
{
for (int i = 0; i < InnerLoopCount; i++)
{
var writableBuffer = _pipe.Writer.Alloc(_writeLenght);
writableBuffer.Advance(_writeLenght);
await writableBuffer.FlushAsync();
}
});
var reading = Task.Run(async () =>
{
int remaining = InnerLoopCount * _writeLenght;
while (remaining != 0)
{
var result = await _pipe.Reader.ReadAsync();
remaining -= result.Buffer.Length;
_pipe.Reader.Advance(result.Buffer.End, result.Buffer.End);
}
});
Task.WaitAll(writing, reading);
}
[Benchmark(OperationsPerInvoke = InnerLoopCount)]
public void ParseLiveAspNetInline()
{
for (int i = 0; i < InnerLoopCount; i++)
{
var writableBuffer = _pipe.Writer.Alloc(_writeLenght);
writableBuffer.Advance(_writeLenght);
writableBuffer.FlushAsync().GetAwaiter().GetResult();
var result = _pipe.Reader.ReadAsync().GetAwaiter().GetResult();
_pipe.Reader.Advance(result.Buffer.End, result.Buffer.End);
}
}
}
}
| 1 | 13,699 | Not new, but nit: _writeLeng*th*. | aspnet-KestrelHttpServer | .cs |
@@ -133,6 +133,13 @@ func TestComposeCmd(t *testing.T) {
assert.Error(err)
}
+func TestCheckCompose(t *testing.T) {
+ assert := asrt.New(t)
+
+ err := CheckDockerCompose()
+ assert.NoError(err)
+}
+
func TestGetAppContainers(t *testing.T) {
assert := asrt.New(t)
sites, err := GetAppContainers("dockertest") | 1 | package dockerutil_test
import (
"os"
"testing"
log "github.com/sirupsen/logrus"
"path/filepath"
. "github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/output"
docker "github.com/fsouza/go-dockerclient"
asrt "github.com/stretchr/testify/assert"
)
var (
// The image here can be any image, it just has to exist so it can be used for labels, etc.
TestRouterImage = "busybox"
TestRouterTag = "1"
)
func TestMain(m *testing.M) {
output.LogSetUp()
// prep docker container for docker util tests
client := GetDockerClient()
err := client.PullImage(docker.PullImageOptions{
Repository: TestRouterImage,
Tag: TestRouterTag,
}, docker.AuthConfiguration{})
if err != nil {
log.Fatal("failed to pull test image ", err)
}
container, err := client.CreateContainer(docker.CreateContainerOptions{
Name: "envtest",
Config: &docker.Config{
Image: TestRouterImage + ":" + TestRouterTag,
Labels: map[string]string{
"com.docker.compose.service": "ddevrouter",
"com.ddev.site-name": "dockertest",
},
Env: []string{"HOTDOG=superior-to-corndog", "POTATO=future-fry"},
},
})
if err != nil {
log.Fatal("failed to create/start docker container ", err)
}
exitStatus := m.Run()
// teardown docker container from docker util tests
err = client.RemoveContainer(docker.RemoveContainerOptions{
ID: container.ID,
Force: true,
})
if err != nil {
log.Fatal("failed to remove test container: ", err)
}
os.Exit(exitStatus)
}
// TestGetContainerHealth tests the function for processing container readiness.
func TestGetContainerHealth(t *testing.T) {
assert := asrt.New(t)
container := docker.APIContainers{
Status: "Up 24 seconds (health: starting)",
}
out := GetContainerHealth(container)
assert.Equal(out, "starting")
container = docker.APIContainers{
Status: "Up 14 minutes (healthy)",
}
out = GetContainerHealth(container)
assert.Equal(out, "healthy")
container = docker.APIContainers{
State: "exited",
}
out = GetContainerHealth(container)
assert.Equal(out, container.State)
container = docker.APIContainers{
State: "restarting",
}
out = GetContainerHealth(container)
assert.Equal(out, container.State)
}
// TestContainerWait tests the error cases for the container check wait loop.
func TestContainerWait(t *testing.T) {
assert := asrt.New(t)
labels := map[string]string{
"com.ddev.site-name": "foo",
"com.docker.compose.service": "web",
}
err := ContainerWait(0, labels)
assert.Error(err)
assert.Equal("health check timed out", err.Error())
err = ContainerWait(5, labels)
assert.Error(err)
assert.Equal("failed to query container", err.Error())
}
// TestComposeCmd tests execution of docker-compose commands.
func TestComposeCmd(t *testing.T) {
assert := asrt.New(t)
composeFiles := []string{filepath.Join("testdata", "docker-compose.yml")}
stdout, stderr, err := ComposeCmd(composeFiles, "config", "--services")
assert.NoError(err)
assert.Contains(stdout, "web")
assert.Contains(stdout, "db")
assert.Contains(stderr, "Defaulting to a blank string")
composeFiles = append(composeFiles, filepath.Join("testdata", "docker-compose.override.yml"))
stdout, stderr, err = ComposeCmd(composeFiles, "config", "--services")
assert.NoError(err)
assert.Contains(stdout, "web")
assert.Contains(stdout, "db")
assert.Contains(stdout, "foo")
assert.Contains(stderr, "Defaulting to a blank string")
composeFiles = []string{"invalid.yml"}
_, _, err = ComposeCmd(composeFiles, "config", "--services")
assert.Error(err)
}
func TestGetAppContainers(t *testing.T) {
assert := asrt.New(t)
sites, err := GetAppContainers("dockertest")
assert.NoError(err)
assert.Equal(sites[0].Image, TestRouterImage+":"+TestRouterTag)
}
func TestGetContainerEnv(t *testing.T) {
assert := asrt.New(t)
container, err := FindContainerByLabels(map[string]string{"com.docker.compose.service": "ddevrouter"})
assert.NoError(err)
env := GetContainerEnv("HOTDOG", container)
assert.Equal("superior-to-corndog", env)
env = GetContainerEnv("POTATO", container)
assert.Equal("future-fry", env)
env = GetContainerEnv("NONEXISTENT", container)
assert.Equal("", env)
}
| 1 | 11,989 | Our habit is to go ahead and put a description line (or more) in front of every function, not just non-test or exported functions. | drud-ddev | php |
@@ -3,11 +3,12 @@ package volume
import (
"crypto/tls"
"encoding/json"
- "github.com/libopenstorage/openstorage/api"
- "github.com/stretchr/testify/require"
"net/http"
"net/http/httptest"
"testing"
+
+ "github.com/libopenstorage/openstorage/api"
+ "github.com/stretchr/testify/require"
)
func TestClientTLS(t *testing.T) { | 1 | package volume
import (
"crypto/tls"
"encoding/json"
"github.com/libopenstorage/openstorage/api"
"github.com/stretchr/testify/require"
"net/http"
"net/http/httptest"
"testing"
)
func TestClientTLS(t *testing.T) {
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var vol *api.Volume
json.NewEncoder(w).Encode(vol)
}))
defer ts.Close()
clnt, err := NewDriverClient(ts.URL, "pxd", "", "")
require.NoError(t, err)
clnt.SetTLS(&tls.Config{InsecureSkipVerify: true})
_, err = VolumeDriver(clnt).Inspect([]string{"12345"})
require.NoError(t, err)
}
func TestClientCredCreate(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var request *api.CredCreateRequest
var response *api.CredCreateResponse
if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
http.Error(w, "Failed decode input parameters", http.StatusBadRequest)
return
}
if len(request.InputParams) == 0 {
http.Error(w, "No input provided", http.StatusBadRequest)
return
}
if _, ok := request.InputParams[api.OptCredType]; !ok {
http.Error(w, "No input provided", http.StatusBadRequest)
return
}
if request.InputParams[api.OptCredType] != "s3" &&
request.InputParams[api.OptCredType] != "google" &&
request.InputParams[api.OptCredType] != "azure" {
http.Error(w, "Unsuported Cloud provider", http.StatusBadRequest)
return
}
if request.InputParams[api.OptCredType] == "s3" {
reqion := request.InputParams[api.OptCredRegion]
endPoint := request.InputParams[api.OptCredEndpoint]
accessKey := request.InputParams[api.OptCredAccessKey]
secret := request.InputParams[api.OptCredSecretKey]
if reqion == "" || endPoint == "" || accessKey == "" || secret == "" {
http.Error(w, "No input provided", http.StatusBadRequest)
return
}
}
if request.InputParams[api.OptCredType] == "google" {
projectID := request.InputParams[api.OptCredGoogleProjectID]
jsonKey := request.InputParams[api.OptCredGoogleJsonKey]
if projectID == "" || jsonKey == "" {
http.Error(w, "No input provided", http.StatusBadRequest)
return
}
}
if request.InputParams[api.OptCredType] == "azure" {
accName := request.InputParams[api.OptCredAzureAccountName]
accessKey := request.InputParams[api.OptCredAzureAccountKey]
if accName == "" || accessKey == "" {
http.Error(w, "No input provided", http.StatusBadRequest)
return
}
}
json.NewEncoder(w).Encode(response)
}))
defer ts.Close()
clnt, err := NewDriverClient(ts.URL, "pxd", "", "")
require.NoError(t, err)
input := make(map[string]string, 0)
_, err = VolumeDriver(clnt).CredsCreate(input)
require.Error(t, err)
input[api.OptCredType] = "s3"
_, err = VolumeDriver(clnt).CredsCreate(input)
require.Error(t, err)
input[api.OptCredRegion] = "abc"
input[api.OptCredEndpoint] = "http.xy.abc.bz.com"
_, err = VolumeDriver(clnt).CredsCreate(input)
require.Error(t, err)
input[api.OptCredAccessKey] = "myaccessley"
input[api.OptCredSecretKey] = "OptCredSecretKey"
_, err = VolumeDriver(clnt).CredsCreate(input)
require.NoError(t, err)
input[api.OptCredType] = "google"
_, err = VolumeDriver(clnt).CredsCreate(input)
require.Error(t, err)
input[api.OptCredGoogleJsonKey] = "abc"
input[api.OptCredGoogleProjectID] = "defgh34ijk"
_, err = VolumeDriver(clnt).CredsCreate(input)
require.NoError(t, err)
input[api.OptCredType] = "azure"
_, err = VolumeDriver(clnt).CredsCreate(input)
require.Error(t, err)
input[api.OptCredAzureAccountName] = "abc"
input[api.OptCredAzureAccountKey] = "defgh34ijk"
_, err = VolumeDriver(clnt).CredsCreate(input)
require.NoError(t, err)
}
func TestClientCredsList(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
response := make(map[string]interface{}, 0)
json.NewEncoder(w).Encode(response)
}))
defer ts.Close()
clnt, err := NewDriverClient(ts.URL, "pxd", "", "")
require.NoError(t, err)
_, err = VolumeDriver(clnt).CredsEnumerate()
require.NoError(t, err)
}
| 1 | 6,420 | Remove this file from the PR | libopenstorage-openstorage | go |
@@ -488,7 +488,7 @@ Blockly.Variables.renameVariable = function(workspace, variable,
var validate = Blockly.Variables.nameValidator_.bind(null, varType);
var promptText = promptMsg.replace('%1', variable.name);
- Blockly.prompt(promptText, '',
+ Blockly.prompt(promptText, variable.name,
function(newName, additionalVars) {
if (variable.isCloud &&
newName.length > 0 && newName.indexOf(Blockly.Variables.CLOUD_PREFIX) == 0 ) { | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Utility functions for handling variables.
* @author [email protected] (Neil Fraser)
*/
'use strict';
/**
* @name Blockly.Variables
* @namespace
**/
goog.provide('Blockly.Variables');
goog.require('Blockly.Blocks');
goog.require('Blockly.constants');
goog.require('Blockly.VariableModel');
goog.require('Blockly.Workspace');
goog.require('goog.string');
/**
* Constant to separate variable names from procedures and generated functions
* when running generators.
* @deprecated Use Blockly.VARIABLE_CATEGORY_NAME
*/
Blockly.Variables.NAME_TYPE = Blockly.VARIABLE_CATEGORY_NAME;
/**
* Constant prefix to differentiate cloud variable names from other types
* of variables.
* This is the \u2601 cloud unicode character followed by a space.
* @type {string}
* @package
*/
Blockly.Variables.CLOUD_PREFIX = '☁ ';
/**
* Find all user-created variables that are in use in the workspace.
* For use by generators.
* @param {!Blockly.Block|!Blockly.Workspace} root Root block or workspace.
* @return {!Array.<string>} Array of variable names.
*/
Blockly.Variables.allUsedVariables = function(root) {
var blocks;
if (root instanceof Blockly.Block) {
// Root is Block.
blocks = root.getDescendants(false);
} else if (root instanceof Blockly.Workspace ||
root instanceof Blockly.WorkspaceSvg) {
// Root is Workspace.
blocks = root.getAllBlocks();
} else {
throw 'Not Block or Workspace: ' + root;
}
var ignorableName = Blockly.Variables.noVariableText();
var variableHash = Object.create(null);
// Iterate through every block and add each variable to the hash.
for (var x = 0; x < blocks.length; x++) {
var blockVariables = blocks[x].getVarModels();
if (blockVariables) {
for (var y = 0; y < blockVariables.length; y++) {
var variable = blockVariables[y];
// Variable ID may be null if the block is only half-built.
if (variable.getId() && variable.name.toLowerCase() != ignorableName) {
variableHash[variable.name.toLowerCase()] = variable.name;
}
}
}
}
// Flatten the hash into a list.
var variableList = [];
for (var name in variableHash) {
variableList.push(variableHash[name]);
}
return variableList;
};
/**
* Find all variables that the user has created through the workspace or
* toolbox. For use by generators.
* @param {!Blockly.Workspace} root The workspace to inspect.
* @return {!Array.<Blockly.VariableModel>} Array of variable models.
*/
Blockly.Variables.allVariables = function(root) {
if (root instanceof Blockly.Block) {
// Root is Block.
console.warn('Deprecated call to Blockly.Variables.allVariables ' +
'with a block instead of a workspace. You may want ' +
'Blockly.Variables.allUsedVariables');
return {};
}
return root.getAllVariables();
};
/**
* Find all developer variables used by blocks in the workspace.
* Developer variables are never shown to the user, but are declared as global
* variables in the generated code.
* To declare developer variables, define the getDeveloperVariables function on
* your block and return a list of variable names.
* For use by generators.
* @param {!Blockly.Workspace} workspace The workspace to search.
* @return {!Array.<string>} A list of non-duplicated variable names.
* @package
*/
Blockly.Variables.allDeveloperVariables = function(workspace) {
var blocks = workspace.getAllBlocks();
var hash = {};
for (var i = 0; i < blocks.length; i++) {
var block = blocks[i];
if (block.getDeveloperVars) {
var devVars = block.getDeveloperVars();
for (var j = 0; j < devVars.length; j++) {
hash[devVars[j]] = devVars[j];
}
}
}
// Flatten the hash into a list.
var list = [];
for (var name in hash) {
list.push(hash[name]);
}
return list;
};
/**
* Return the text that should be used in a field_variable or
* field_variable_getter when no variable exists.
* TODO: #572
* @return {string} The text to display.
*/
Blockly.Variables.noVariableText = function() {
return "No variable selected";
};
/**
* Return a new variable name that is not yet being used. This will try to
* generate single letter variable names in the range 'i' to 'z' to start with.
* If no unique name is located it will try 'i' to 'z', 'a' to 'h',
* then 'i2' to 'z2' etc. Skip 'l'.
* @param {!Blockly.Workspace} workspace The workspace to be unique in.
* @return {string} New variable name.
*/
Blockly.Variables.generateUniqueName = function(workspace) {
var variableList = workspace.getAllVariables();
var newName = '';
if (variableList.length) {
var nameSuffix = 1;
var letters = 'ijkmnopqrstuvwxyzabcdefgh'; // No 'l'.
var letterIndex = 0;
var potName = letters.charAt(letterIndex);
while (!newName) {
var inUse = false;
for (var i = 0; i < variableList.length; i++) {
if (variableList[i].name.toLowerCase() == potName) {
// This potential name is already used.
inUse = true;
break;
}
}
if (inUse) {
// Try the next potential name.
letterIndex++;
if (letterIndex == letters.length) {
// Reached the end of the character sequence so back to 'i'.
// a new suffix.
letterIndex = 0;
nameSuffix++;
}
potName = letters.charAt(letterIndex);
if (nameSuffix > 1) {
potName += nameSuffix;
}
} else {
// We can use the current potential name.
newName = potName;
}
}
} else {
newName = 'i';
}
return newName;
};
/**
* Remove any possiblity of conflict/duplication between a real and potential variable.
* When creating a new variable, checks whether the desired name and type already exists
* as a real or potential variable.
* If 'checkReal' is true, checks whether a real variable with the given
* name and type already exists.
* Checks whether a potential variable (using the given 'potentialVarWs') exists.
* If a potential var exists and a real var also exists, discards the potential var
* and returns the real var.
* If a potential var exists and a real var does not exist (or 'checkReal'
* was false), creates the potential var as a real var,
* discards the potential var, and returns the newly created real var.
* If a potential var does not exist, returns null.
*
* @param {string} varName The name of the variable to check for.
* @param {string} varType The type of the variable to check for.
* @param {!Blockly.Workspace} potentialVarWs The workspace containing the
* potential variable map we want to check against.
* @param {boolean} checkReal Whether or not to check if a variable of the given
* name and type exists as a real variable.
* @return {?Blockly.VariableModel} The matching variable, if one already existed
* in the real workspace; the newly transformed variable, if one already
* existed as a potential variable. Null, if no matching variable, real or
* potential, was found.
*/
Blockly.Variables.realizePotentialVar = function(varName, varType, potentialVarWs,
checkReal) {
var potentialVarMap = potentialVarWs.getPotentialVariableMap();
var realWs = potentialVarWs.targetWorkspace;
if (!potentialVarMap) {
console.warn('Called Blockly.Variables.realizePotentialVar with incorrect ' +
'workspace. The provided workspace does not have a potential variable map.');
return;
}
// First check if a variable with the same name and type already exists as a
// real variable.
var realVar;
if (checkReal) {
realVar = Blockly.Variables.getVariable(realWs, null, varName, varType);
}
// Check if variable with same name and type exists as a potential var
var potentialVar = potentialVarMap.getVariable(varName, varType);
if (!potentialVar) {
return null;
}
// The potential var exists, so save its id and delete it from the potential
// variable map.
var id = potentialVar.getId();
potentialVarMap.deleteVariable(potentialVar);
// Depending on whether a real var already exists or not, either return the
// existing real var or turn the potential var into a new one using its id.
if (realVar) {
return realVar;
}
return realWs.createVariable(varName, varType, id);
};
/**
* Create a new variable on the given workspace.
* @param {!Blockly.Workspace} workspace The workspace on which to create the
* variable.
* @param {function(?string=)=} opt_callback An optional callback function to act
* on the id of the variable that is created from the user's input, or null
* if the change is to be aborted (cancel button or an invalid name was provided).
* @param {string} opt_type Optional type of the variable to be created,
* like 'string' or 'list'.
*/
Blockly.Variables.createVariable = function(workspace, opt_callback, opt_type) {
// Decide on a modal message based on the opt_type. If opt_type was not
// provided, default to the original message for scalar variables.
var newMsg, modalTitle;
if (opt_type == Blockly.BROADCAST_MESSAGE_VARIABLE_TYPE) {
newMsg = Blockly.Msg.NEW_BROADCAST_MESSAGE_TITLE;
modalTitle = Blockly.Msg.BROADCAST_MODAL_TITLE;
} else if (opt_type == Blockly.LIST_VARIABLE_TYPE) {
newMsg = Blockly.Msg.NEW_LIST_TITLE;
modalTitle = Blockly.Msg.LIST_MODAL_TITLE;
} else {
// Note: this case covers 1) scalar variables, 2) any new type of
// variable not explicitly checked for above, and 3) a null or undefined
// opt_type -- turns a falsey opt_type into ''
// TODO (#1251) Warn developers that they didn't provide an opt_type/provided
// a falsey opt_type
opt_type = opt_type ? opt_type : '';
newMsg = Blockly.Msg.NEW_VARIABLE_TITLE;
modalTitle = Blockly.Msg.VARIABLE_MODAL_TITLE;
}
var validate = Blockly.Variables.nameValidator_.bind(null, opt_type);
// Prompt the user to enter a name for the variable
Blockly.prompt(newMsg, '',
function(text, additionalVars, variableOptions) {
variableOptions = variableOptions || {};
var scope = variableOptions.scope;
var isLocal = (scope === 'local') || false;
var isCloud = variableOptions.isCloud || false;
// Default to [] if additionalVars is not provided
additionalVars = additionalVars || [];
// Only use additionalVars for global variable creation.
var additionalVarNames = isLocal ? [] : additionalVars;
var validatedText = validate(text, workspace, additionalVarNames, isCloud, opt_callback);
if (validatedText) {
// The name is valid according to the type, create the variable
var potentialVarMap = workspace.getPotentialVariableMap();
var variable;
// This check ensures that if a new variable is being created from a
// workspace that already has a variable of the same name and type as
// a potential variable, that potential variable gets turned into a
// real variable and thus there aren't duplicate options in the field_variable
// dropdown.
if (potentialVarMap && opt_type) {
variable = Blockly.Variables.realizePotentialVar(validatedText,
opt_type, workspace, false);
}
if (!variable) {
variable = workspace.createVariable(validatedText, opt_type, null, isLocal, isCloud);
}
var flyout = workspace.isFlyout ? workspace : workspace.getFlyout();
var variableBlockId = variable.getId();
if (flyout.setCheckboxState) {
flyout.setCheckboxState(variableBlockId, true);
}
if (opt_callback) {
opt_callback(variableBlockId);
}
} else {
// User canceled prompt without a value.
if (opt_callback) {
opt_callback(null);
}
}
}, modalTitle, opt_type);
};
/**
* This function provides a common interface for variable name validation agnostic
* of type. This is so that functions like Blockly.Variables.createVariable and
* Blockly.Variables.renameVariable can call a single function (with a single
* type signature) to validate the user-provided name for a variable.
* @param {string} type The type of the variable for which the provided name
* should be validated.
* @param {string} text The user-provided text that should be validated as a
* variable name.
* @param {!Blockly.Workspace} workspace The workspace on which to validate the
* variable name. This is the workspace used to check whether the variable
* already exists.
* @param {Array<string>} additionalVars A list of additional var names to check
* for conflicts against.
* @param {boolean} isCloud Whether the variable is a cloud variable.
* @param {function(?string=)=} opt_callback An optional function to be called on
* a pre-existing variable of the user-provided name. This function is currently
* only used for broadcast messages.
* @return {string} The validated name according to the parameters given, if
* the name is determined to be valid, or null if the name
* is determined to be invalid/in-use, and the calling function should not
* proceed with creating or renaming the variable.
* @private
*/
Blockly.Variables.nameValidator_ = function(type, text, workspace, additionalVars,
isCloud, opt_callback) {
// The validators for the different variable types require slightly different arguments.
// For broadcast messages, if a broadcast message of the provided name already exists,
// the validator needs to call a function that updates the selected
// field option of the dropdown menu of the block that was used to create the new message.
// For scalar variables and lists, the validator has the same validation behavior, but needs
// to know which type of variable to check for and needs a type-specific error message
// that is displayed when a variable of the given name and type already exists.
if (type == Blockly.BROADCAST_MESSAGE_VARIABLE_TYPE) {
return Blockly.Variables.validateBroadcastMessageName_(text, workspace, opt_callback);
} else if (type == Blockly.LIST_VARIABLE_TYPE) {
return Blockly.Variables.validateScalarVarOrListName_(text, workspace, additionalVars, false, type,
Blockly.Msg.LIST_ALREADY_EXISTS);
} else {
return Blockly.Variables.validateScalarVarOrListName_(text, workspace, additionalVars, isCloud, type,
Blockly.Msg.VARIABLE_ALREADY_EXISTS);
}
};
/**
* Validate the given name as a broadcast message type.
* @param {string} name The name to validate
* @param {!Blockly.Workspace} workspace The workspace the name should be validated
* against.
* @param {function(?string=)=} opt_callback An optional function to call if a broadcast
* message already exists with the given name. This function will be called on the id
* of the existing variable.
* @return {string} The validated name, or null if invalid.
* @private
*/
Blockly.Variables.validateBroadcastMessageName_ = function(name, workspace, opt_callback) {
if (!name) { // no name was provided or the user cancelled the prompt
return null;
}
var variable = workspace.getVariable(name, Blockly.BROADCAST_MESSAGE_VARIABLE_TYPE);
if (variable) {
// If the user provided a name for a broadcast message that already exists,
// use the provided callback function to update the selected option in
// the field of the block that was used to create
// this message.
if (opt_callback) {
opt_callback(variable.getId());
}
// Return null to signal to the calling function that we do not want to create
// a new variable since one already exists.
return null;
} else {
// The name provided is actually a new name, so the calling
// function should go ahead and create it as a new variable.
return name;
}
};
/**
* Validate the given name as a scalar variable or list type.
* This function is also responsible for any user facing error-handling.
* @param {string} name The name to validate
* @param {!Blockly.Workspace} workspace The workspace the name should be validated
* against.
* @param {Array<string>} additionalVars A list of additional variable names to check
* for conflicts against.
* @param {boolean} isCloud Whether the variable is a cloud variable.
* @param {string} type The type to validate the variable as. This should be one of
* Blockly.SCALAR_VARIABLE_TYPE or Blockly.LIST_VARIABLE_TYPE.
* @param {string} errorMsg The type-specific error message the user should see
* if a variable of the validated, given name and type already exists.
* @return {string} The validated name, or null if invalid.
* @private
*/
Blockly.Variables.validateScalarVarOrListName_ = function(name, workspace, additionalVars,
isCloud, type, errorMsg) {
// For scalar variables, we don't want leading or trailing white space
name = Blockly.Variables.trimName_(name);
if (!name) {
return null;
}
if (isCloud) {
name = Blockly.Variables.CLOUD_PREFIX + name;
}
if (workspace.getVariable(name, type) || additionalVars.indexOf(name) >= 0) {
// error
Blockly.alert(errorMsg.replace('%1', name));
return null;
} else { // trimmed name is valid
return name;
}
};
/**
* Rename a variable with the given workspace, variableType, and oldName.
* @param {!Blockly.Workspace} workspace The workspace on which to rename the
* variable.
* @param {Blockly.VariableModel} variable Variable to rename.
* @param {function(?string=)=} opt_callback A callback. It will
* be passed an acceptable new variable name, or null if change is to be
* aborted (cancel button), or undefined if an existing variable was chosen.
*/
Blockly.Variables.renameVariable = function(workspace, variable,
opt_callback) {
// Validation and modal message/title depends on the variable type
var promptMsg, modalTitle;
var varType = variable.type;
if (varType == Blockly.BROADCAST_MESSAGE_VARIABLE_TYPE) {
console.warn('Unexpected attempt to rename a broadcast message with ' +
'id: ' + variable.getId() + ' and name: ' + variable.name);
return;
}
if (varType == Blockly.LIST_VARIABLE_TYPE) {
promptMsg = Blockly.Msg.RENAME_LIST_TITLE;
modalTitle = Blockly.Msg.RENAME_LIST_MODAL_TITLE;
} else {
// Default for all other types of variables
promptMsg = Blockly.Msg.RENAME_VARIABLE_TITLE;
modalTitle = Blockly.Msg.RENAME_VARIABLE_MODAL_TITLE;
}
var validate = Blockly.Variables.nameValidator_.bind(null, varType);
var promptText = promptMsg.replace('%1', variable.name);
Blockly.prompt(promptText, '',
function(newName, additionalVars) {
if (variable.isCloud &&
newName.length > 0 && newName.indexOf(Blockly.Variables.CLOUD_PREFIX) == 0 ) {
newName = newName.substring(2); // The name validator will add the prefix back
}
additionalVars = additionalVars || [];
var additionalVarNames = variable.isLocal ? [] : additionalVars;
var validatedText = validate(newName, workspace, additionalVarNames, variable.isCloud);
if (validatedText) {
workspace.renameVariableById(variable.getId(), validatedText);
if (opt_callback) {
opt_callback(newName);
}
} else {
// User canceled prompt without a value.
if (opt_callback) {
opt_callback(null);
}
}
}, modalTitle, varType);
};
/**
* Strip leading and trailing whitespace from the given name, for use with
* user provided name for scalar variables and lists.
* @param {string} name The user-provided name of the variable.
* @return {string} The trimmed name, or whatever falsey value was originally provided.
*/
Blockly.Variables.trimName_ = function(name) {
if (name) {
return goog.string.trim(name);
} else {
// Return whatever was provided
return name;
}
};
/**
* Generate XML string for variable field.
* @param {!Blockly.VariableModel} variableModel The variable model to generate
* an XML string from.
* @param {?string} opt_name The optional name of the field, such as "VARIABLE"
* or "LIST". Defaults to "VARIABLE".
* @return {string} The generated XML.
* @private
*/
Blockly.Variables.generateVariableFieldXml_ = function(variableModel, opt_name) {
// The variable name may be user input, so it may contain characters that need
// to be escaped to create valid XML.
var typeString = variableModel.type;
if (typeString == '') {
typeString = '\'\'';
}
var fieldName = opt_name || 'VARIABLE';
var text = '<field name="' + fieldName + '" id="' + variableModel.getId() +
'" variabletype="' + goog.string.htmlEscape(typeString) +
'">' + goog.string.htmlEscape(variableModel.name) + '</field>';
return text;
};
/**
* Helper function to look up or create a variable on the given workspace.
* If no variable exists, creates and returns it.
* @param {!Blockly.Workspace} workspace The workspace to search for the
* variable. It may be a flyout workspace or main workspace.
* @param {string} id The ID to use to look up or create the variable, or null.
* @param {string=} opt_name The string to use to look up or create the
* variable.
* @param {string=} opt_type The type to use to look up or create the variable.
* @return {!Blockly.VariableModel} The variable corresponding to the given ID
* or name + type combination.
* @package
*/
Blockly.Variables.getOrCreateVariablePackage = function(workspace, id, opt_name,
opt_type) {
var variable = Blockly.Variables.getVariable(workspace, id, opt_name,
opt_type);
if (!variable) {
variable = Blockly.Variables.createVariable_(workspace, id, opt_name,
opt_type);
}
return variable;
};
/**
* Look up a variable on the given workspace.
* Always looks in the main workspace before looking in the flyout workspace.
* Always prefers lookup by ID to lookup by name + type.
* @param {!Blockly.Workspace} workspace The workspace to search for the
* variable. It may be a flyout workspace or main workspace.
* @param {string} id The ID to use to look up the variable, or null.
* @param {string=} opt_name The string to use to look up the variable. Only
* used if lookup by ID fails.
* @param {string=} opt_type The type to use to look up the variable. Only used
* if lookup by ID fails.
* @return {?Blockly.VariableModel} The variable corresponding to the given ID
* or name + type combination, or null if not found.
* @package
*/
Blockly.Variables.getVariable = function(workspace, id, opt_name, opt_type) {
var potentialVariableMap = workspace.getPotentialVariableMap();
// Try to just get the variable, by ID if possible.
if (id) {
// Look in the real variable map before checking the potential variable map.
var variable = workspace.getVariableById(id);
if (!variable && potentialVariableMap) {
variable = potentialVariableMap.getVariableById(id);
}
} else if (opt_name) {
if (opt_type == undefined) {
throw new Error('Tried to look up a variable by name without a type');
}
// Otherwise look up by name and type.
var variable = workspace.getVariable(opt_name, opt_type);
if (!variable && potentialVariableMap) {
variable = potentialVariableMap.getVariable(opt_name, opt_type);
}
}
return variable;
};
/**
* Helper function to create a variable on the given workspace.
* @param {!Blockly.Workspace} workspace The workspace in which to create the
* variable. It may be a flyout workspace or main workspace.
* @param {string} id The ID to use to create the variable, or null.
* @param {string=} opt_name The string to use to create the variable.
* @param {string=} opt_type The type to use to create the variable.
* @return {!Blockly.VariableModel} The variable corresponding to the given ID
* or name + type combination.
* @private
*/
Blockly.Variables.createVariable_ = function(workspace, id, opt_name,
opt_type) {
var potentialVariableMap = workspace.getPotentialVariableMap();
// Variables without names get uniquely named for this workspace.
if (!opt_name) {
var ws = workspace.isFlyout ? workspace.targetWorkspace : workspace;
opt_name = Blockly.Variables.generateUniqueName(ws);
}
// Create a potential variable if in the flyout.
if (potentialVariableMap) {
var variable = potentialVariableMap.createVariable(opt_name, opt_type, id);
} else { // In the main workspace, create a real variable.
var variable = workspace.createVariable(opt_name, opt_type, id);
}
return variable;
};
/**
* Helper function to get the list of variables that have been added to the
* workspace after adding a new block, using the given list of variables that
* were in the workspace before the new block was added.
* @param {!Blockly.Workspace} workspace The workspace to inspect.
* @param {!Array.<!Blockly.VariableModel>} originalVariables The array of
* variables that existed in the workspace before adding the new block.
* @return {!Array.<!Blockly.VariableModel>} The new array of variables that were
* freshly added to the workspace after creating the new block, or [] if no
* new variables were added to the workspace.
* @package
*/
Blockly.Variables.getAddedVariables = function(workspace, originalVariables) {
var allCurrentVariables = workspace.getAllVariables();
var addedVariables = [];
if (originalVariables.length != allCurrentVariables.length) {
for (var i = 0; i < allCurrentVariables.length; i++) {
var variable = allCurrentVariables[i];
// For any variable that is present in allCurrentVariables but not
// present in originalVariables, add the variable to addedVariables.
if (!originalVariables.includes(variable)) {
addedVariables.push(variable);
}
}
}
return addedVariables;
};
| 1 | 9,861 | I think that for cloud variables, the default value in the prompt should be the variable name without the cloud prefix; this is slightly different from the scratch 2.0 behavior, but I think would ultimately lead to less confusion. Proposing code changes below: | LLK-scratch-blocks | js |
@@ -16,7 +16,8 @@ struct wlr_pointer *create_libinput_pointer(
wlr_log(WLR_ERROR, "Unable to allocate wlr_pointer");
return NULL;
}
- wlr_pointer_init(wlr_pointer, NULL);
+ wlr_pointer_init(wlr_pointer, NULL,
+ libinput_device_config_tap_get_finger_count(libinput_dev) > 0);
return wlr_pointer;
}
| 1 | #include <assert.h>
#include <libinput.h>
#include <stdlib.h>
#include <wlr/backend/session.h>
#include <wlr/interfaces/wlr_pointer.h>
#include <wlr/types/wlr_input_device.h>
#include <wlr/util/log.h>
#include "backend/libinput.h"
#include "util/signal.h"
struct wlr_pointer *create_libinput_pointer(
struct libinput_device *libinput_dev) {
assert(libinput_dev);
struct wlr_pointer *wlr_pointer = calloc(1, sizeof(struct wlr_pointer));
if (!wlr_pointer) {
wlr_log(WLR_ERROR, "Unable to allocate wlr_pointer");
return NULL;
}
wlr_pointer_init(wlr_pointer, NULL);
return wlr_pointer;
}
void handle_pointer_motion(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer event for a device with no pointers?");
return;
}
struct libinput_event_pointer *pevent =
libinput_event_get_pointer_event(event);
struct wlr_event_pointer_motion wlr_event = { 0 };
wlr_event.device = wlr_dev;
wlr_event.time_msec =
usec_to_msec(libinput_event_pointer_get_time_usec(pevent));
wlr_event.delta_x = libinput_event_pointer_get_dx(pevent);
wlr_event.delta_y = libinput_event_pointer_get_dy(pevent);
wlr_event.unaccel_dx = libinput_event_pointer_get_dx_unaccelerated(pevent);
wlr_event.unaccel_dy = libinput_event_pointer_get_dy_unaccelerated(pevent);
wlr_signal_emit_safe(&wlr_dev->pointer->events.motion, &wlr_event);
wlr_signal_emit_safe(&wlr_dev->pointer->events.frame, wlr_dev->pointer);
}
void handle_pointer_motion_abs(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer event for a device with no pointers?");
return;
}
struct libinput_event_pointer *pevent =
libinput_event_get_pointer_event(event);
struct wlr_event_pointer_motion_absolute wlr_event = { 0 };
wlr_event.device = wlr_dev;
wlr_event.time_msec =
usec_to_msec(libinput_event_pointer_get_time_usec(pevent));
wlr_event.x = libinput_event_pointer_get_absolute_x_transformed(pevent, 1);
wlr_event.y = libinput_event_pointer_get_absolute_y_transformed(pevent, 1);
wlr_signal_emit_safe(&wlr_dev->pointer->events.motion_absolute, &wlr_event);
wlr_signal_emit_safe(&wlr_dev->pointer->events.frame, wlr_dev->pointer);
}
void handle_pointer_button(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer event for a device with no pointers?");
return;
}
struct libinput_event_pointer *pevent =
libinput_event_get_pointer_event(event);
struct wlr_event_pointer_button wlr_event = { 0 };
wlr_event.device = wlr_dev;
wlr_event.time_msec =
usec_to_msec(libinput_event_pointer_get_time_usec(pevent));
wlr_event.button = libinput_event_pointer_get_button(pevent);
switch (libinput_event_pointer_get_button_state(pevent)) {
case LIBINPUT_BUTTON_STATE_PRESSED:
wlr_event.state = WLR_BUTTON_PRESSED;
break;
case LIBINPUT_BUTTON_STATE_RELEASED:
wlr_event.state = WLR_BUTTON_RELEASED;
break;
}
wlr_signal_emit_safe(&wlr_dev->pointer->events.button, &wlr_event);
wlr_signal_emit_safe(&wlr_dev->pointer->events.frame, wlr_dev->pointer);
}
void handle_pointer_axis(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer event for a device with no pointers?");
return;
}
struct libinput_event_pointer *pevent =
libinput_event_get_pointer_event(event);
struct wlr_event_pointer_axis wlr_event = { 0 };
wlr_event.device = wlr_dev;
wlr_event.time_msec =
usec_to_msec(libinput_event_pointer_get_time_usec(pevent));
switch (libinput_event_pointer_get_axis_source(pevent)) {
case LIBINPUT_POINTER_AXIS_SOURCE_WHEEL:
wlr_event.source = WLR_AXIS_SOURCE_WHEEL;
break;
case LIBINPUT_POINTER_AXIS_SOURCE_FINGER:
wlr_event.source = WLR_AXIS_SOURCE_FINGER;
break;
case LIBINPUT_POINTER_AXIS_SOURCE_CONTINUOUS:
wlr_event.source = WLR_AXIS_SOURCE_CONTINUOUS;
break;
case LIBINPUT_POINTER_AXIS_SOURCE_WHEEL_TILT:
wlr_event.source = WLR_AXIS_SOURCE_WHEEL_TILT;
break;
}
const enum libinput_pointer_axis axes[] = {
LIBINPUT_POINTER_AXIS_SCROLL_VERTICAL,
LIBINPUT_POINTER_AXIS_SCROLL_HORIZONTAL,
};
for (size_t i = 0; i < sizeof(axes) / sizeof(axes[0]); ++i) {
if (libinput_event_pointer_has_axis(pevent, axes[i])) {
switch (axes[i]) {
case LIBINPUT_POINTER_AXIS_SCROLL_VERTICAL:
wlr_event.orientation = WLR_AXIS_ORIENTATION_VERTICAL;
break;
case LIBINPUT_POINTER_AXIS_SCROLL_HORIZONTAL:
wlr_event.orientation = WLR_AXIS_ORIENTATION_HORIZONTAL;
break;
}
wlr_event.delta =
libinput_event_pointer_get_axis_value(pevent, axes[i]);
wlr_event.delta_discrete =
libinput_event_pointer_get_axis_value_discrete(pevent, axes[i]);
wlr_signal_emit_safe(&wlr_dev->pointer->events.axis, &wlr_event);
}
}
wlr_signal_emit_safe(&wlr_dev->pointer->events.frame, wlr_dev->pointer);
}
void handle_pointer_swipe_begin(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer gesture event for a device with no pointers?");
return;
}
struct libinput_event_gesture *gevent =
libinput_event_get_gesture_event(event);
struct wlr_event_pointer_swipe_begin wlr_event = {
.device = wlr_dev,
.time_msec =
usec_to_msec(libinput_event_gesture_get_time_usec(gevent)),
.fingers = libinput_event_gesture_get_finger_count(gevent),
};
wlr_signal_emit_safe(&wlr_dev->pointer->events.swipe_begin, &wlr_event);
}
void handle_pointer_swipe_update(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer gesture event for a device with no pointers?");
return;
}
struct libinput_event_gesture *gevent =
libinput_event_get_gesture_event(event);
struct wlr_event_pointer_swipe_update wlr_event = {
.device = wlr_dev,
.time_msec =
usec_to_msec(libinput_event_gesture_get_time_usec(gevent)),
.fingers = libinput_event_gesture_get_finger_count(gevent),
.dx = libinput_event_gesture_get_dx(gevent),
.dy = libinput_event_gesture_get_dy(gevent),
};
wlr_signal_emit_safe(&wlr_dev->pointer->events.swipe_update, &wlr_event);
}
void handle_pointer_swipe_end(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer gesture event for a device with no pointers?");
return;
}
struct libinput_event_gesture *gevent =
libinput_event_get_gesture_event(event);
struct wlr_event_pointer_swipe_end wlr_event = {
.device = wlr_dev,
.time_msec =
usec_to_msec(libinput_event_gesture_get_time_usec(gevent)),
.cancelled = libinput_event_gesture_get_cancelled(gevent),
};
wlr_signal_emit_safe(&wlr_dev->pointer->events.swipe_end, &wlr_event);
}
void handle_pointer_pinch_begin(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer gesture event for a device with no pointers?");
return;
}
struct libinput_event_gesture *gevent =
libinput_event_get_gesture_event(event);
struct wlr_event_pointer_pinch_begin wlr_event = {
.device = wlr_dev,
.time_msec =
usec_to_msec(libinput_event_gesture_get_time_usec(gevent)),
.fingers = libinput_event_gesture_get_finger_count(gevent),
};
wlr_signal_emit_safe(&wlr_dev->pointer->events.pinch_begin, &wlr_event);
}
void handle_pointer_pinch_update(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer gesture event for a device with no pointers?");
return;
}
struct libinput_event_gesture *gevent =
libinput_event_get_gesture_event(event);
struct wlr_event_pointer_pinch_update wlr_event = {
.device = wlr_dev,
.time_msec =
usec_to_msec(libinput_event_gesture_get_time_usec(gevent)),
.fingers = libinput_event_gesture_get_finger_count(gevent),
.dx = libinput_event_gesture_get_dx(gevent),
.dy = libinput_event_gesture_get_dy(gevent),
.scale = libinput_event_gesture_get_scale(gevent),
.rotation = libinput_event_gesture_get_angle_delta(gevent),
};
wlr_signal_emit_safe(&wlr_dev->pointer->events.pinch_update, &wlr_event);
}
void handle_pointer_pinch_end(struct libinput_event *event,
struct libinput_device *libinput_dev) {
struct wlr_input_device *wlr_dev =
get_appropriate_device(WLR_INPUT_DEVICE_POINTER, libinput_dev);
if (!wlr_dev) {
wlr_log(WLR_DEBUG, "Got a pointer gesture event for a device with no pointers?");
return;
}
struct libinput_event_gesture *gevent =
libinput_event_get_gesture_event(event);
struct wlr_event_pointer_pinch_end wlr_event = {
.device = wlr_dev,
.time_msec =
usec_to_msec(libinput_event_gesture_get_time_usec(gevent)),
.cancelled = libinput_event_gesture_get_cancelled(gevent),
};
wlr_signal_emit_safe(&wlr_dev->pointer->events.pinch_end, &wlr_event);
}
| 1 | 13,645 | I think I would rather fish this interface through than use it as the basis for heuristics. | swaywm-wlroots | c |
@@ -153,7 +153,7 @@ namespace pwiz.Skyline.Controls.Graphs
public static float QValueCutoff
{
get => Properties.Settings.Default.DetectionsQValueCutoff;
- set => Properties.Settings.Default.DetectionsQValueCutoff = value;
+ set => Properties.Settings.Default.DetectionsQValueCutoff = value;
}
public static TargetType TargetType | 1 | /*
* Original author: Rita Chupalov <ritach .at. uw.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2020 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Reflection;
using System.Windows.Forms;
using pwiz.Common.Collections;
using pwiz.Common.Controls;
using pwiz.Common.SystemUtil;
using pwiz.Skyline.Model;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
namespace pwiz.Skyline.Controls.Graphs
{
public sealed class DetectionsGraphController : GraphSummary.IControllerSplit
{
public class IntLabeledValue : LabeledValues<int>
{
protected IntLabeledValue(int value, Func<string> getLabelFunc) : base(value, getLabelFunc)
{
Value = value;
}
public float Value { get; private set; }
public override string ToString()
{
if (this is TargetType)
return Label;
else
return string.Format(Label, Settings.TargetType);
}
public static IEnumerable<T> GetValues<T>() where T : IntLabeledValue
{
return (IEnumerable<T>)typeof(T).InvokeMember("GetValues", BindingFlags.InvokeMethod,
null, null, new object[0]);
}
public static T GetDefaultValue<T>() where T : IntLabeledValue
{
return (T)typeof(T).InvokeMember("GetDefaultValue", BindingFlags.InvokeMethod,
null, null, new object[0]);
}
public static T GetFromString<T>(string str) where T : IntLabeledValue
{
T res;
if (typeof(T) == typeof(TargetType))
res = GetValues<T>().FirstOrDefault(
(t) => t.Label.Equals(str));
else
res = GetValues<T>().FirstOrDefault(
(t) => string.Format(t.Label, Settings.TargetType).Equals(str));
if (res == default(T))
return GetDefaultValue<T>();
else return res;
}
public static void PopulateCombo<T>(ComboBox comboBox, T currentValue) where T : IntLabeledValue
{
comboBox.Items.Clear();
foreach (var val in GetValues<T>())
{
comboBox.Items.Add(val);
if (Equals(val, currentValue))
{
comboBox.SelectedIndex = comboBox.Items.Count - 1;
}
}
}
public static void PopulateCombo<T>(ToolStripComboBox comboBox, T currentValue) where T : IntLabeledValue
{
comboBox.Items.Clear();
foreach (var val in GetValues<T>())
{
comboBox.Items.Add(val);
if (Equals(val, currentValue))
{
comboBox.SelectedIndex = comboBox.Items.Count - 1;
}
}
}
public static T GetValue<T>(ComboBox comboBox, T defaultVal) where T : IntLabeledValue
{
return comboBox.SelectedItem as T ?? defaultVal;
}
public static T GetValue<T>(ToolStripComboBox comboBox, T defaultVal) where T : IntLabeledValue
{
return comboBox.SelectedItem as T ?? defaultVal;
}
}
public class TargetType : IntLabeledValue
{
private TargetType(int value, Func<string> getLabelFunc) : base(value, getLabelFunc) { }
public static readonly TargetType PRECURSOR = new TargetType(0, () => Resources.DetectionPlot_TargetType_Precursor);
public static readonly TargetType PEPTIDE = new TargetType(1, () => Resources.DetectionPlot_TargetType_Peptide);
public static IEnumerable<TargetType> GetValues()
{
return new[] { PRECURSOR, PEPTIDE };
}
public static TargetType GetDefaultValue()
{
return PRECURSOR;
}
}
public class YScaleFactorType : IntLabeledValue
{
private YScaleFactorType(int value, Func<string> getLabelFunc) : base(value, getLabelFunc) { }
public static readonly YScaleFactorType ONE = new YScaleFactorType(1, () => Resources.DetectionPlot_YScale_One);
public static readonly YScaleFactorType PERCENT = new YScaleFactorType(0, () => Resources.DetectionPlot_YScale_Percent);
public static IEnumerable<YScaleFactorType> GetValues()
{
return new[] { ONE, PERCENT };
}
public static YScaleFactorType GetDefaultValue()
{
return ONE;
}
}
public class Settings
{
public static float QValueCutoff
{
get => Properties.Settings.Default.DetectionsQValueCutoff;
set => Properties.Settings.Default.DetectionsQValueCutoff = value;
}
public static TargetType TargetType
{
get => IntLabeledValue.GetFromString<TargetType>(
Properties.Settings.Default.DetectionsTargetType);
set => Properties.Settings.Default.DetectionsTargetType = value.ToString();
}
public static YScaleFactorType YScaleFactor
{
get => IntLabeledValue.GetFromString<YScaleFactorType>(
Properties.Settings.Default.DetectionsYScaleFactor);
set => Properties.Settings.Default.DetectionsYScaleFactor = value.ToString();
}
public static int RepCount
{
get => Properties.Settings.Default.DetectionsRepCount;
set => Properties.Settings.Default.DetectionsRepCount = value;
}
public static float FontSize
{
get => Properties.Settings.Default.AreaFontSize;
set => Properties.Settings.Default.AreaFontSize = value;
}
public static bool ShowAtLeastN
{
get => Properties.Settings.Default.DetectionsShowAtLeastN;
set => Properties.Settings.Default.DetectionsShowAtLeastN = value;
}
// ReSharper disable once MemberHidesStaticFromOuterClass
public static bool ShowSelection
{
get => Properties.Settings.Default.DetectionsShowSelection;
set => Properties.Settings.Default.DetectionsShowSelection = value;
}
public static bool ShowMean
{
get => Properties.Settings.Default.DetectionsShowMean;
set => Properties.Settings.Default.DetectionsShowMean = value;
}
public static bool ShowLegend
{
get => Properties.Settings.Default.DetectionsShowLegend;
set => Properties.Settings.Default.DetectionsShowLegend = value;
}
}
private GraphSummary.IControllerSplit _controllerInterface;
public DetectionsGraphController()
{
_controllerInterface = this;
}
public static GraphTypeSummary GraphType
{
get { return Helpers.ParseEnum(Properties.Settings.Default.DetectionGraphType, GraphTypeSummary.invalid); }
set { Properties.Settings.Default.DetectionGraphType = value.ToString(); }
}
GraphSummary GraphSummary.IController.GraphSummary { get; set; }
UniqueList<GraphTypeSummary> GraphSummary.IController.GraphTypes
{
get => Properties.Settings.Default.DetectionGraphTypes;
set => Properties.Settings.Default.DetectionGraphTypes = value;
}
public IFormView FormView =>new GraphSummary.DetectionsGraphView();
string GraphSummary.IController.Text => Resources.SkylineWindow_CreateGraphDetections_Counts;
SummaryGraphPane GraphSummary.IControllerSplit.CreatePeptidePane(PaneKey key)
{
throw new NotImplementedException();
}
SummaryGraphPane GraphSummary.IControllerSplit.CreateReplicatePane(PaneKey key)
{
throw new NotImplementedException();
}
bool GraphSummary.IController.HandleKeyDownEvent(object sender, KeyEventArgs e)
{
if(e.KeyCode == Keys.Escape)
DetectionPlotData.GetDataCache().Cancel();
return true;
}
bool GraphSummary.IControllerSplit.IsPeptidePane(SummaryGraphPane pane) => false;
bool GraphSummary.IControllerSplit.IsReplicatePane(SummaryGraphPane pane) => false;
void GraphSummary.IController.OnActiveLibraryChanged()
{
(this as GraphSummary.IController).GraphSummary.UpdateUI();
}
void GraphSummary.IController.OnDocumentChanged(SrmDocument oldDocument, SrmDocument newDocument)
{
var settingsNew = newDocument.Settings;
var settingsOld = oldDocument.Settings;
if (_controllerInterface.GraphSummary.Type == GraphTypeSummary.detections ||
_controllerInterface.GraphSummary.Type == GraphTypeSummary.detections_histogram)
{
}
}
void GraphSummary.IController.OnRatioIndexChanged()
{
}
void GraphSummary.IController.OnResultsIndexChanged()
{
if (_controllerInterface.GraphSummary.GraphPanes.OfType<DetectionsByReplicatePane>().Any())
_controllerInterface.GraphSummary.UpdateUI();
}
void GraphSummary.IController.OnUpdateGraph()
{
var pane = _controllerInterface.GraphSummary.GraphPanes.FirstOrDefault();
switch (_controllerInterface.GraphSummary.Type)
{
case GraphTypeSummary.detections:
if (!(pane is DetectionsByReplicatePane))
_controllerInterface.GraphSummary.GraphPanes = new[]
{
new DetectionsByReplicatePane(_controllerInterface.GraphSummary),
};
break;
case GraphTypeSummary.detections_histogram:
if (!(pane is DetectionsHistogramPane))
_controllerInterface.GraphSummary.GraphPanes = new[]
{
new DetectionsHistogramPane(_controllerInterface.GraphSummary)
};
break;
}
if (!ReferenceEquals(_controllerInterface.GraphSummary.GraphPanes.FirstOrDefault(), pane))
(pane as IDisposable)?.Dispose();
}
}
}
| 1 | 13,748 | Probably not worth making this line-ending change, since it is the only change to this file. | ProteoWizard-pwiz | .cs |
@@ -1180,8 +1180,13 @@ encode_opnd_x0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
static inline bool
decode_opnd_memx0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
+#ifdef DR_HOST_NOT_TARGET
+ CLIENT_ASSERT(opnd_size_in_bytes(OPSZ_CACHE_LINE) == 64, "OPSZ_CACHE_LINE != 64");
+#endif
*opnd = opnd_create_base_disp(decode_reg(extract_uint(enc, 0, 5), true, false),
- DR_REG_NULL, 0, 0, OPSZ_sys);
+ DR_REG_NULL, 0, 0, OPSZ_CACHE_LINE);
+ if (!opnd_set_base_aligned(opnd, true))
+ return false;
return true;
}
| 1 | /* **********************************************************
* Copyright (c) 2017-2021 Google, Inc. All rights reserved.
* Copyright (c) 2016 ARM Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* AArch64 decoder and encoder functions.
* This file is rather large and should perhaps be split up, but there are many
* opportunities for inlining which could be lost if it were split into separate
* translation units, and it is helpful to have the per-operand-type decode/encode
* functions next to each other.
*/
#include <stdint.h>
#include "../globals.h"
#include "arch.h"
#include "decode.h"
#include "disassemble.h"
#include "instr.h"
#include "instr_create_shared.h"
#include "codec.h"
/* Decode immediate argument of bitwise operations.
* Returns zero if the encoding is invalid.
*/
static ptr_uint_t
decode_bitmask(uint enc)
{
uint pos = enc >> 6 & 63;
uint len = enc & 63;
ptr_uint_t x;
if (TEST(1U << 12, enc)) {
if (len == 63)
return 0;
x = ((ptr_uint_t)1 << (len + 1)) - 1;
return x >> pos | x << 1 << (63 - pos);
} else {
uint i, t = 32;
while ((t & len) != 0)
t >>= 1;
if (t < 2)
return 0;
x = len & (t - 1);
if (x == t - 1)
return 0;
x = ((ptr_uint_t)1 << (x + 1)) - 1;
pos &= t - 1;
x = x >> pos | x << (t - pos);
for (i = 2; i < 64; i *= 2) {
if (t <= i)
x |= x << i;
}
return x;
}
}
/* Encode immediate argument of bitwise operations.
* Returns -1 if the value cannot be encoded.
*/
static int
encode_bitmask(ptr_uint_t x)
{
int neg, rep, pos, len;
neg = 0;
if ((x & 1) != 0)
neg = 1, x = ~x;
if (x == 0)
return -1;
if (x >> 2 == (x & (((ptr_uint_t)1 << (64 - 2)) - 1)))
rep = 2, x &= ((ptr_uint_t)1 << 2) - 1;
else if (x >> 4 == (x & (((ptr_uint_t)1 << (64 - 4)) - 1)))
rep = 4, x &= ((ptr_uint_t)1 << 4) - 1;
else if (x >> 8 == (x & (((ptr_uint_t)1 << (64 - 8)) - 1)))
rep = 8, x &= ((ptr_uint_t)1 << 8) - 1;
else if (x >> 16 == (x & (((ptr_uint_t)1 << (64 - 16)) - 1)))
rep = 16, x &= ((ptr_uint_t)1 << 16) - 1;
else if (x >> 32 == (x & (((ptr_uint_t)1 << (64 - 32)) - 1)))
rep = 32, x &= ((ptr_uint_t)1 << 32) - 1;
else
rep = 64;
pos = 0;
(x & (((ptr_uint_t)1 << 32) - 1)) != 0 ? 0 : (x >>= 32, pos += 32);
(x & (((ptr_uint_t)1 << 16) - 1)) != 0 ? 0 : (x >>= 16, pos += 16);
(x & (((ptr_uint_t)1 << 8) - 1)) != 0 ? 0 : (x >>= 8, pos += 8);
(x & (((ptr_uint_t)1 << 4) - 1)) != 0 ? 0 : (x >>= 4, pos += 4);
(x & (((ptr_uint_t)1 << 2) - 1)) != 0 ? 0 : (x >>= 2, pos += 2);
(x & (((ptr_uint_t)1 << 1) - 1)) != 0 ? 0 : (x >>= 1, pos += 1);
len = 0;
(~x & (((ptr_uint_t)1 << 32) - 1)) != 0 ? 0 : (x >>= 32, len += 32);
(~x & (((ptr_uint_t)1 << 16) - 1)) != 0 ? 0 : (x >>= 16, len += 16);
(~x & (((ptr_uint_t)1 << 8) - 1)) != 0 ? 0 : (x >>= 8, len += 8);
(~x & (((ptr_uint_t)1 << 4) - 1)) != 0 ? 0 : (x >>= 4, len += 4);
(~x & (((ptr_uint_t)1 << 2) - 1)) != 0 ? 0 : (x >>= 2, len += 2);
(~x & (((ptr_uint_t)1 << 1) - 1)) != 0 ? 0 : (x >>= 1, len += 1);
if (x != 0)
return -1;
if (neg) {
pos = (pos + len) & (rep - 1);
len = rep - len;
}
return (0x1000 & rep << 6) | (((rep - 1) ^ 31) << 1 & 63) |
((rep - pos) & (rep - 1)) << 6 | (len - 1);
}
/* Extract signed integer from subfield of word. */
static inline ptr_int_t
extract_int(uint enc, int pos, int len)
{
uint u = ((enc >> pos & (((uint)1 << (len - 1)) - 1)) -
(enc >> pos & ((uint)1 << (len - 1))));
return u << 1 < u ? -(ptr_int_t)~u - 1 : u;
}
/* Extract unsigned integer from subfield of word. */
static inline ptr_uint_t
extract_uint(uint enc, int pos, int len)
{
/* pos starts at bit 0 and len includes pos bit as part of its length. */
return enc >> pos & (((uint)1 << len) - 1);
}
/* Find the highest bit set in subfield, relative to the starting position. */
static inline uint
highest_bit_set(uint enc, int pos, int len, int *highest_bit)
{
for (int i = pos + len - 1; i >= pos; i--) {
if (enc & (1 << i)) {
*highest_bit = i - pos;
return true;
}
}
return false;
}
static inline aarch64_reg_offset
get_reg_offset(reg_t reg)
{
if (reg >= DR_REG_Q0 && reg <= DR_REG_Q31)
return QUAD_REG;
else if (reg >= DR_REG_D0 && reg <= DR_REG_D31)
return DOUBLE_REG;
else if (reg >= DR_REG_S0 && reg <= DR_REG_S31)
return SINGLE_REG;
else if (reg >= DR_REG_H0 && reg <= DR_REG_H31)
return HALF_REG;
else if (reg >= DR_REG_B0 && reg <= DR_REG_B31)
return BYTE_REG;
else
return NOT_A_REG;
}
static inline bool
try_encode_int(OUT uint *bits, int len, int scale, ptr_int_t val)
{
/* If any of lowest 'scale' bits are set, or 'val' is out of range, fail. */
if (((ptr_uint_t)val & ((1U << scale) - 1)) != 0 ||
val < -((ptr_int_t)1 << (len + scale - 1)) ||
val >= (ptr_int_t)1 << (len + scale - 1))
return false;
*bits = (ptr_uint_t)val >> scale & ((1U << len) - 1);
return true;
}
static inline bool
try_encode_imm(OUT uint *imm, int bits, opnd_t opnd)
{
ptr_int_t value;
if (!opnd_is_immed_int(opnd))
return false;
value = opnd_get_immed_int(opnd);
if (!(0 <= value && value < (uint)1 << bits))
return false;
*imm = value;
return true;
}
static inline bool
encode_pc_off(OUT uint *poff, int bits, byte *pc, instr_t *instr, opnd_t opnd,
decode_info_t *di)
{
ptr_uint_t off, range;
ASSERT(0 < bits && bits <= 32);
if (opnd.kind == PC_kind)
off = opnd.value.pc - pc;
else if (opnd.kind == INSTR_kind)
off = (byte *)opnd_get_instr(opnd)->note - (byte *)instr->note;
else
return false;
range = (ptr_uint_t)1 << bits;
if (!TEST(~((range - 1) << 2), off + (range << 1))) {
*poff = off >> 2 & (range - 1);
return true;
}
/* If !di->check_reachable we still require correct alignment. */
if (!di->check_reachable && ALIGNED(off, 4)) {
*poff = 0;
return true;
}
return false;
}
static inline opnd_t
decode_sysreg(uint imm15)
{
reg_t sysreg;
switch (imm15) {
case 0x5a10: sysreg = DR_REG_NZCV; break;
case 0x5a20: sysreg = DR_REG_FPCR; break;
case 0x5a21: sysreg = DR_REG_FPSR; break;
case 0x5e82: sysreg = DR_REG_TPIDR_EL0; break;
case 0x5e83: sysreg = DR_REG_TPIDRRO_EL0; break;
case 0x5f02: sysreg = DR_REG_CNTVCT_EL0; break;
default: return opnd_create_immed_uint(imm15, OPSZ_2);
}
return opnd_create_reg(sysreg);
}
static inline bool
encode_sysreg(OUT uint *imm15, opnd_t opnd)
{
if (opnd_is_reg(opnd)) {
switch (opnd_get_reg(opnd)) {
case DR_REG_NZCV: *imm15 = 0x5a10; break;
case DR_REG_FPCR: *imm15 = 0x5a20; break;
case DR_REG_FPSR: *imm15 = 0x5a21; break;
case DR_REG_TPIDR_EL0: *imm15 = 0x5e82; break;
case DR_REG_TPIDRRO_EL0: *imm15 = 0x5e83; break;
case DR_REG_CNTVCT_EL0: *imm15 = 0x5f02; break;
default: return false;
}
return true;
}
if (opnd_is_immed_int(opnd)) {
uint imm;
if (try_encode_imm(&imm, 15, opnd) && !opnd_is_reg(decode_sysreg(imm))) {
*imm15 = imm;
return true;
}
return false;
}
return false;
}
/* Decode integer register. Input 'n' is number from 0 to 31, where
* 31 can mean stack pointer or zero register, depending on 'is_sp'.
*/
static inline reg_id_t
decode_reg(uint n, bool is_x, bool is_sp)
{
return (n < 31 ? (is_x ? DR_REG_X0 : DR_REG_W0) + n
: is_sp ? (is_x ? DR_REG_XSP : DR_REG_WSP)
: (is_x ? DR_REG_XZR : DR_REG_WZR));
}
/* Encode integer register. */
static inline bool
encode_reg(OUT uint *num, OUT bool *is_x, reg_id_t reg, bool is_sp)
{
if (DR_REG_X0 <= reg && reg <= DR_REG_X30) {
*num = reg - DR_REG_X0;
*is_x = true;
return true;
}
if (DR_REG_W0 <= reg && reg <= DR_REG_W30) {
*num = reg - DR_REG_W0;
*is_x = false;
return true;
}
if (is_sp && (reg == DR_REG_XSP || reg == DR_REG_WSP)) {
*num = 31;
*is_x = (reg == DR_REG_XSP);
return true;
}
if (!is_sp && (reg == DR_REG_XZR || reg == DR_REG_WZR)) {
*num = 31;
*is_x = (reg == DR_REG_XZR);
return true;
}
return false;
}
/* Decode SIMD/FP register. */
static inline opnd_t
decode_vreg(uint scale, uint n)
{
reg_id_t reg = DR_REG_NULL;
ASSERT(n < 32 && scale < 5);
switch (scale) {
case 0: reg = DR_REG_B0 + n; break;
case 1: reg = DR_REG_H0 + n; break;
case 2: reg = DR_REG_S0 + n; break;
case 3: reg = DR_REG_D0 + n; break;
case 4: reg = DR_REG_Q0 + n; break;
}
return opnd_create_reg(reg);
}
/* Encode SIMD/FP register. */
static inline bool
encode_vreg(INOUT opnd_size_t *x, OUT uint *r, opnd_t opnd)
{
reg_id_t reg;
opnd_size_t sz;
uint n;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
if ((uint)(reg - DR_REG_B0) < 32) {
n = reg - DR_REG_B0;
sz = OPSZ_1;
} else if ((uint)(reg - DR_REG_H0) < 32) {
n = reg - DR_REG_H0;
sz = OPSZ_2;
} else if ((uint)(reg - DR_REG_S0) < 32) {
n = reg - DR_REG_S0;
sz = OPSZ_4;
} else if ((uint)(reg - DR_REG_D0) < 32) {
n = reg - DR_REG_D0;
sz = OPSZ_8;
} else if ((uint)(reg - DR_REG_Q0) < 32) {
n = reg - DR_REG_Q0;
sz = OPSZ_16;
} else
return false;
if (*x == OPSZ_NA)
*x = sz;
else if (*x != sz)
return false;
*r = n;
return true;
}
static opnd_t
create_base_imm(uint enc, int disp, int bytes)
{
/* The base register number comes from bits 5 to 9. It may be SP. */
return opnd_create_base_disp(decode_reg(extract_uint(enc, 5, 5), true, true),
DR_REG_NULL, 0, disp, opnd_size_from_bytes(bytes));
}
static bool
is_base_imm(opnd_t opnd, OUT uint *regnum)
{
uint n;
bool is_x;
if (!opnd_is_base_disp(opnd) || opnd_get_index(opnd) != DR_REG_NULL ||
!encode_reg(&n, &is_x, opnd_get_base(opnd), true) || !is_x)
return false;
*regnum = n;
return true;
}
/* Used for mem7* operand types, which have a 7-bit offset and are used by
* load/store (pair) instructions. Returns the scale (log base 2 of number
* of bytes) of the memory argument, a function of bits 26, 30 and 31.
*/
static int
mem7_scale(uint enc)
{
return 2 +
(TEST(1U << 26, enc) ? extract_uint(enc, 30, 2) : extract_uint(enc, 31, 1));
}
/* Used for memlit operand type, used by load (literal). Returns the size
* of the memory operand, a function of bits 26, 30 and 31.
*/
static opnd_size_t
memlit_size(uint enc)
{
opnd_size_t size = OPSZ_0;
switch (extract_uint(enc, 30, 2)) {
case 0: size = OPSZ_4; break;
case 1: size = OPSZ_8; break;
case 2: size = TEST(1U << 26, enc) ? OPSZ_16 : OPSZ_4;
}
return size;
}
/* Returns the number of registers accessed by SIMD load structure and replicate,
* a function of bits 13 and 21.
*/
static int
memvr_regcount(uint enc)
{
return ((enc >> 13 & 1) << 1 | (enc >> 21 & 1)) + 1;
}
/* Used for memvs operand type, used by SIMD load/store single structure.
* Returns the number of bytes read or written, which is a function of
* bits 10, 11, 13, 14, 15 and 21.
*/
static int
memvs_size(uint enc)
{
int scale = extract_uint(enc, 14, 2);
/* Number of elements in structure, 1 to 4. */
int elems = memvr_regcount(enc);
int size = extract_uint(enc, 10, 2);
if (scale == 2 && size == 1)
scale = 3;
return elems * (1 << scale);
}
/* Returns the number of registers accessed by SIMD load/store multiple structures,
* a function of bits 12-15.
*/
static int
multistruct_regcount(uint enc)
{
switch (extract_uint(enc, 12, 4)) {
case 0: return 4;
case 2: return 4;
case 4: return 3;
case 6: return 3;
case 7: return 1;
case 8: return 2;
case 10: return 2;
}
ASSERT(false);
return 0;
}
/*******************************************************************************
* Pairs of functions for decoding and encoding a generalised type of operand.
*/
/* adr_page: used for adr, adrp */
static bool
decode_opnd_adr_page(int scale, uint enc, byte *pc, OUT opnd_t *opnd)
{
uint bits = (enc >> 3 & 0x1ffffc) | (enc >> 29 & 3);
byte *addr = ((byte *)((ptr_uint_t)pc >> scale << scale) +
extract_int(bits, 0, 21) * ((ptr_int_t)1 << scale));
*opnd = opnd_create_rel_addr(addr, OPSZ_0);
return true;
}
static bool
encode_opnd_adr_page(int scale, byte *pc, opnd_t opnd, OUT uint *enc_out, instr_t *instr,
decode_info_t *di)
{
ptr_int_t offset;
uint bits;
if (opnd_is_rel_addr(opnd)) {
offset = (ptr_int_t)opnd_get_addr(opnd) -
(ptr_int_t)((ptr_uint_t)pc >> scale << scale);
} else if (opnd_is_instr(opnd)) {
offset = (ptr_int_t)((byte *)opnd_get_instr(opnd)->note - (byte *)instr->note);
} else
return false;
if (try_encode_int(&bits, 21, scale, offset)) {
*enc_out = (bits & 3) << 29 | (bits & 0x1ffffc) << 3;
return true;
}
/* If !di->check_reachable we still require correct alignment. */
if (!di->check_reachable && ALIGNED(offset, 1ULL << scale)) {
*enc_out = 0;
return true;
}
return false;
}
/* dq_plus: used for dq0, dq5, dq16, dq0p1, dq0p2, dq0p3 */
static inline bool
decode_opnd_dq_plus(int add, int rpos, int qpos, uint enc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg((TEST(1U << qpos, enc) ? DR_REG_Q0 : DR_REG_D0) +
(extract_uint(enc, rpos, rpos + 5) + add) % 32);
return true;
}
static inline bool
encode_opnd_dq_plus(int add, int rpos, int qpos, opnd_t opnd, OUT uint *enc_out)
{
uint num;
bool q;
if (!opnd_is_reg(opnd))
return false;
q = (uint)(opnd_get_reg(opnd) - DR_REG_Q0) < 32;
num = opnd_get_reg(opnd) - (q ? DR_REG_Q0 : DR_REG_D0);
if (num >= 32)
return false;
*enc_out = ((num - add) % 32) << rpos | (uint)q << qpos;
return true;
}
/* index: used for opnd_index0, ..., opnd_index3 */
static bool
decode_opnd_index(int n, uint enc, OUT opnd_t *opnd)
{
uint bits = (enc >> 30 & 1) << 3 | (enc >> 10 & 7);
*opnd = opnd_create_immed_int(bits >> n, OPSZ_4b);
return true;
}
static bool
encode_opnd_index(int n, opnd_t opnd, OUT uint *enc_out)
{
ptr_int_t val;
uint bits;
if (!opnd_is_immed_int(opnd))
return false;
val = opnd_get_immed_int(opnd);
if (val < 0 || val >= 16 >> n)
return false;
bits = val << n;
*enc_out = (bits >> 3 & 1) << 30 | (bits & 7) << 10;
return true;
}
/* int: used for almost every operand type that is an immediate integer */
static bool
decode_opnd_int(int pos, int len, bool signd, int scale, opnd_size_t size,
dr_opnd_flags_t flags, uint enc, OUT opnd_t *opnd)
{
ptr_int_t val = signd ? extract_int(enc, pos, len) : extract_uint(enc, pos, len);
*opnd =
opnd_add_flags(opnd_create_immed_int(val * ((ptr_int_t)1 << scale), size), flags);
return true;
}
static bool
encode_opnd_int(int pos, int len, bool signd, int scale, dr_opnd_flags_t flags,
opnd_t opnd, OUT uint *enc_out)
{
ptr_uint_t val;
if (!opnd_is_immed_int(opnd) || (opnd_get_flags(opnd) & flags) != flags)
return false;
val = opnd_get_immed_int(opnd);
if ((val & (((ptr_uint_t)1 << scale) - 1)) != 0)
return false;
if ((val + (signd ? ((ptr_uint_t)1 << (len + scale - 1)) : 0)) >> (len + scale) != 0)
return false;
*enc_out = (val >> scale & (((ptr_uint_t)1 << (len - 1)) * 2 - 1)) << pos;
return true;
}
/* imm_bf: used for bitfield immediate operands */
static bool
decode_opnd_imm_bf(int pos, uint enc, OUT opnd_t *opnd)
{
if (!TEST(1U << 31, enc) && extract_uint(enc, pos, 6) >= 32)
return false;
return decode_opnd_int(pos, 6, false, 0, OPSZ_6b, 0, enc, opnd);
}
static bool
encode_opnd_imm_bf(int pos, uint enc, opnd_t opnd, uint *enc_out)
{
if (!TEST(1U << 31, enc) && extract_uint(enc, pos, 6) >= 32)
return false;
return encode_opnd_int(pos, 6, false, 0, 0, opnd, enc_out);
}
/* mem0_scale: used for mem0, mem0p */
static inline bool
decode_opnd_mem0_scale(int scale, uint enc, OUT opnd_t *opnd)
{
*opnd = create_base_imm(enc, 0, 1 << scale);
return true;
}
static inline bool
encode_opnd_mem0_scale(int scale, opnd_t opnd, OUT uint *enc_out)
{
uint xn;
if (!is_base_imm(opnd, &xn) ||
opnd_get_size(opnd) != opnd_size_from_bytes(1 << scale) ||
opnd_get_disp(opnd) != 0)
return false;
*enc_out = xn << 5;
return true;
}
/* mem12_scale: used for mem12, mem12q, prf12 */
static inline bool
decode_opnd_mem12_scale(int scale, bool prfm, uint enc, OUT opnd_t *opnd)
{
*opnd =
create_base_imm(enc, extract_uint(enc, 10, 12) << scale, prfm ? 0 : 1 << scale);
return true;
}
static inline bool
encode_opnd_mem12_scale(int scale, bool prfm, opnd_t opnd, OUT uint *enc_out)
{
int disp;
uint xn;
if (!is_base_imm(opnd, &xn) ||
opnd_get_size(opnd) != (prfm ? OPSZ_0 : opnd_size_from_bytes(1 << scale)))
return false;
disp = opnd_get_disp(opnd);
if (disp < 0 || disp >> scale > 0xfff || disp >> scale << scale != disp)
return false;
*enc_out = xn << 5 | (uint)disp >> scale << 10;
return true;
}
/* mem7_postindex: used for mem7, mem7post */
static inline bool
decode_opnd_mem7_postindex(bool post, uint enc, OUT opnd_t *opnd)
{
int scale = mem7_scale(enc);
*opnd = create_base_imm(enc, post ? 0 : extract_int(enc, 15, 7) * (1 << scale),
2 << scale);
opnd->value.base_disp.pre_index = !post;
return true;
}
static inline bool
encode_opnd_mem7_postindex(bool post, uint enc, opnd_t opnd, OUT uint *enc_out)
{
int scale = mem7_scale(enc);
int disp;
uint xn;
if (!is_base_imm(opnd, &xn) ||
opnd_get_size(opnd) != opnd_size_from_bytes(2 << scale))
return false;
disp = opnd_get_disp(opnd);
if (disp == 0 && opnd.value.base_disp.pre_index == post)
return false;
if (post ? disp != 0
: ((uint)disp & ((1 << scale) - 1)) != 0 ||
(uint)disp + (0x40 << scale) >= (0x80 << scale))
return false;
*enc_out = xn << 5 | ((uint)disp >> scale & 0x7f) << 15;
return true;
}
/* mem9_bytes: used for mem9, mem9post, mem9q, mem9qpost, prf9 */
static inline bool
decode_opnd_mem9_bytes(int bytes, bool post, uint enc, OUT opnd_t *opnd)
{
*opnd = create_base_imm(enc, post ? 0 : extract_int(enc, 12, 9), bytes);
opnd->value.base_disp.pre_index = !post;
return true;
}
static inline bool
encode_opnd_mem9_bytes(int bytes, bool post, opnd_t opnd, OUT uint *enc_out)
{
int disp;
uint xn;
if (!is_base_imm(opnd, &xn) || opnd_get_size(opnd) != opnd_size_from_bytes(bytes))
return false;
disp = opnd_get_disp(opnd);
if (disp == 0 && opnd.value.base_disp.pre_index == post)
return false;
if (post ? (disp != 0) : (disp < -256 || disp > 255))
return false;
*enc_out = xn << 5 | ((uint)disp & 0x1ff) << 12;
return true;
}
/* memreg_size: used for memreg, memregq, prfreg */
static inline bool
decode_opnd_memreg_size(opnd_size_t size, uint enc, OUT opnd_t *opnd)
{
if (!TEST(1U << 14, enc))
return false;
*opnd = opnd_create_base_disp_aarch64(decode_reg(enc >> 5 & 31, true, true),
decode_reg(enc >> 16 & 31, true, false),
enc >> 13 & 7, TEST(1U << 12, enc), 0, 0, size);
return true;
}
static inline bool
encode_opnd_memreg_size(opnd_size_t size, opnd_t opnd, OUT uint *enc_out)
{
uint rn, rm, option;
bool xn, xm, scaled;
if (!opnd_is_base_disp(opnd) || opnd_get_size(opnd) != size ||
opnd_get_disp(opnd) != 0)
return false;
option = opnd_get_index_extend(opnd, &scaled, NULL);
if (!TEST(2, option))
return false;
if (!encode_reg(&rn, &xn, opnd_get_base(opnd), true) || !xn ||
!encode_reg(&rm, &xm, opnd_get_index(opnd), false) || !xm)
return false;
*enc_out = rn << 5 | rm << 16 | option << 13 | (uint)scaled << 12;
return true;
}
/* q0p: used for q0p1, q0p2, q0p3 */
static bool
decode_opnd_q0p(int add, uint enc, OUT opnd_t *opnd)
{
*opnd = decode_vreg(4, (extract_uint(enc, 0, 5) + add) % 32);
return true;
}
static bool
encode_opnd_q0p(int add, opnd_t opnd, OUT uint *enc_out)
{
opnd_size_t size = OPSZ_NA;
uint r;
if (!encode_vreg(&size, &r, opnd) || size != OPSZ_16)
return false;
*enc_out = (r - add) % 32;
return true;
}
/* rn: used for many integer register operands where bit 31 specifies W or X */
static inline bool
decode_opnd_rn(bool is_sp, int pos, uint enc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(
decode_reg(extract_uint(enc, pos, 5), TEST(1U << 31, enc), is_sp));
return true;
}
static inline bool
encode_opnd_rn(bool is_sp, int pos, opnd_t opnd, OUT uint *enc_out)
{
uint num;
bool is_x;
if (!opnd_is_reg(opnd) || !encode_reg(&num, &is_x, opnd_get_reg(opnd), is_sp))
return false;
*enc_out = (uint)is_x << 31 | num << pos;
return true;
}
/* vector_reg: used for many FP/SIMD register operands */
static bool
decode_opnd_vector_reg(int pos, int scale, uint enc, OUT opnd_t *opnd)
{
*opnd = decode_vreg(scale, extract_uint(enc, pos, 5));
return true;
}
static bool
encode_opnd_vector_reg(int pos, int scale, opnd_t opnd, OUT uint *enc_out)
{
opnd_size_t size = OPSZ_NA;
uint r;
if (!encode_vreg(&size, &r, opnd) || size != opnd_size_from_bytes(1 << scale))
return false;
*enc_out = r << pos;
return true;
}
/* vtn: used for vt0, ..., vt3 */
static bool
decode_opnd_vtn(int add, uint enc, OUT opnd_t *opnd)
{
if (extract_uint(enc, 10, 2) == 3 && extract_uint(enc, 30, 1) == 0)
return false;
*opnd = opnd_create_reg((TEST(1U << 30, enc) ? DR_REG_Q0 : DR_REG_D0) +
((extract_uint(enc, 0, 5) + add) % 32));
return true;
}
static bool
encode_opnd_vtn(int add, uint enc, opnd_t opnd, OUT uint *enc_out)
{
reg_t reg;
uint num;
bool q;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
q = (uint)(reg - DR_REG_Q0) < 32;
if (extract_uint(enc, 10, 2) == 3 && !q)
return false;
num = reg - (q ? DR_REG_Q0 : DR_REG_D0);
if (num >= 32)
return false;
*enc_out = (num - add) % 32 | (uint)q << 30;
return true;
}
/* wxn: used for many integer register operands with fixed size (W or X) */
static bool
decode_opnd_wxn(bool is_x, bool is_sp, int pos, uint enc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(decode_reg(enc >> pos & 31, is_x, is_sp));
return true;
}
static bool
encode_opnd_wxn(bool is_x, bool is_sp, int pos, opnd_t opnd, OUT uint *enc_out)
{
reg_id_t reg;
uint n;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
n = reg - (is_x ? DR_REG_X0 : DR_REG_W0);
if (n < 31) {
*enc_out = n << pos;
return true;
}
if (reg ==
(is_sp ? (is_x ? DR_REG_XSP : DR_REG_WSP) : (is_x ? DR_REG_XZR : DR_REG_WZR))) {
*enc_out = (uint)31 << pos;
return true;
}
return false;
}
/* wxnp: used for CASP, even/odd register pairs */
static bool
decode_opnd_wxnp(bool is_x, int plus, int pos, uint enc, OUT opnd_t *opnd)
{
if ((enc >> pos & 1) != 0)
return false;
*opnd = opnd_create_reg(decode_reg(((enc >> pos) + plus) & 31, is_x, false));
return true;
}
static bool
encode_opnd_wxnp(bool is_x, int plus, int pos, opnd_t opnd, OUT uint *enc_out)
{
reg_id_t reg;
uint n;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
n = reg - (is_x ? DR_REG_X0 : DR_REG_W0);
if (n < 31 && (n - plus) % 2 == 0) {
*enc_out = ((n - plus) & 31) << pos;
return true;
}
if (reg == (is_x ? DR_REG_XZR : DR_REG_WZR) && ((uint)31 - plus) % 2 == 0) {
*enc_out = (((uint)31 - plus) & 31) << pos;
return true;
}
return false;
}
static inline reg_id_t
decode_float_reg(uint n, uint type, reg_id_t *reg)
{
switch (type) {
case 3:
/* Half precision operands are only supported in Armv8.2+. */
*reg = DR_REG_H0 + n;
return true;
case 0: *reg = DR_REG_S0 + n; return true;
case 1: *reg = DR_REG_D0 + n; return true;
default: return false;
}
}
static inline bool
decode_opnd_float_reg(int pos, uint enc, OUT opnd_t *opnd)
{
reg_id_t reg;
if (!decode_float_reg(extract_uint(enc, pos, 5), extract_uint(enc, 22, 2), ®))
return false;
*opnd = opnd_create_reg(reg);
return true;
}
static inline bool
encode_opnd_float_reg(int pos, opnd_t opnd, OUT uint *enc_out)
{
uint num;
uint type;
opnd_size_t size = OPSZ_NA;
if (!encode_vreg(&size, &num, opnd))
return false;
switch (size) {
case OPSZ_2:
/* Half precision operands are only supported in Armv8.2+. */
type = 3;
break;
case OPSZ_4: type = 0; break;
case OPSZ_8: type = 1; break;
default: return false;
}
*enc_out = type << 22 | num << pos;
return true;
}
/* Used to encode a SVE predicate register (P register). */
static inline bool
encode_opnd_p(uint pos_start, uint max_reg_num, opnd_t opnd, OUT uint *enc_out)
{
uint num;
if (!opnd_is_reg(opnd))
return false;
num = opnd_get_reg(opnd) - DR_REG_P0;
if (num > max_reg_num)
return false;
*enc_out = num << pos_start;
return true;
}
/* Used to encode a SVE vector register (Z registers). */
static inline bool
encode_opnd_z(uint pos_start, opnd_t opnd, OUT uint *enc_out)
{
uint num;
if (!opnd_is_reg(opnd))
return false;
num = opnd_get_reg(opnd) - DR_REG_Z0;
if (num >= 32)
return false;
*enc_out = num << pos_start;
return true;
}
/*******************************************************************************
* Pairs of functions for decoding and encoding each type of operand, as listed in
* "codec.txt". Try to keep these short: perhaps a tail call to a function in the
* previous section.
*/
/* impx30: implicit X30 operand, used by BLR */
static inline bool
decode_opnd_impx30(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(DR_REG_X30);
return true;
}
static inline bool
encode_opnd_impx30(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_reg(opnd) || opnd_get_reg(opnd) != DR_REG_X30)
return false;
*enc_out = 0;
return true;
}
/* lsl: constant LSL for ADD/MOV, no encoding bits */
static inline bool
decode_opnd_lsl(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint t = DR_SHIFT_LSL;
return decode_opnd_int(0, 2, false, 0, OPSZ_2b, DR_OPND_IS_SHIFT, t, opnd);
}
static inline bool
encode_opnd_lsl(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(0, 2, false, 0, DR_OPND_IS_SHIFT, opnd, &t) || t != DR_SHIFT_LSL)
return false;
*enc_out = 0;
return true;
}
/* h_sz: Operand size for half precision encoding of floating point vector
* instructions. We need to convert the generic size operand to the right
* encoding bits. It only supports ISZ_HALF.
*/
static inline bool
decode_opnd_h_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_HALF, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_h_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
if (opnd_get_immed_int(opnd) == VECTOR_ELEM_WIDTH_HALF)
return true;
return false;
}
/* b_const_sz: Operand size for byte elements
*/
static inline bool
decode_opnd_b_const_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_BYTE, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_b_const_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
if (opnd_get_immed_int(opnd) == VECTOR_ELEM_WIDTH_BYTE)
return true;
return false;
}
/* s_const_sz: Operand size for single (32-bit) element
*/
static inline bool
decode_opnd_s_const_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_SINGLE, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_s_const_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
if (opnd_get_immed_int(opnd) == VECTOR_ELEM_WIDTH_SINGLE)
return true;
return false;
}
/* d_const_sz: Operand size for double elements
*/
static inline bool
decode_opnd_d_const_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_DOUBLE, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_d_const_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
if (opnd_get_immed_int(opnd) == VECTOR_ELEM_WIDTH_DOUBLE)
return true;
return false;
}
/* vindex_D1: implicit index, always 1 */
static inline bool
decode_opnd_vindex_D1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_immed_int(1, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_vindex_D1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
if (opnd_get_immed_int(opnd) == 1)
return true;
return false;
}
/* nzcv: flag bit specifier for conditional compare */
static inline bool
decode_opnd_nzcv(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(0, 4, false, 0, OPSZ_4b, 0, enc, opnd);
}
static inline bool
encode_opnd_nzcv(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(0, 4, false, 0, 0, opnd, enc_out);
}
/* w0: W register or WZR at bit position 0 */
static inline bool
decode_opnd_w0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 0, enc, opnd);
}
static inline bool
encode_opnd_w0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 0, opnd, enc_out);
}
/* w0p0: even-numbered W register or WZR at bit position 0 */
static inline bool
decode_opnd_w0p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 0, 0, enc, opnd);
}
static inline bool
encode_opnd_w0p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 0, 0, opnd, enc_out);
}
/* w0p1: even-numbered W register or WZR at bit position 0, add 1 */
static inline bool
decode_opnd_w0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 1, 0, enc, opnd);
}
static inline bool
encode_opnd_w0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 1, 0, opnd, enc_out);
}
/* x0: X register or XZR at bit position 0 */
static inline bool
decode_opnd_x0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 0, enc, opnd);
}
static inline bool
encode_opnd_x0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 0, opnd, enc_out);
}
/* memx0: memory operand with no offset used as memref for SYS */
static inline bool
decode_opnd_memx0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_base_disp(decode_reg(extract_uint(enc, 0, 5), true, false),
DR_REG_NULL, 0, 0, OPSZ_sys);
return true;
}
static inline bool
encode_opnd_memx0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint xn;
bool is_x;
/* Only a base address in X reg is valid */
if (!opnd_is_base_disp(opnd) || !encode_reg(&xn, &is_x, opnd_get_base(opnd), false) ||
!is_x || opnd_get_size(opnd) != OPSZ_sys || opnd_get_scale(opnd) != 0 ||
opnd_get_disp(opnd) != 0 || opnd_get_index(opnd) != DR_REG_NULL)
return false;
*enc_out = xn;
return true;
}
/* x0p0: even-numbered X register or XZR at bit position 0 */
static inline bool
decode_opnd_x0p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 0, 0, enc, opnd);
}
static inline bool
encode_opnd_x0p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 0, 0, opnd, enc_out);
}
/* x0p1: even-numbered X register or XZR at bit position 0, add 1 */
static inline bool
decode_opnd_x0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 1, 0, enc, opnd);
}
static inline bool
encode_opnd_x0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 1, 0, opnd, enc_out);
}
/* b0: B register at bit position 0 */
static inline bool
decode_opnd_b0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 0, enc, opnd);
}
static inline bool
encode_opnd_b0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 0, opnd, enc_out);
}
/* h0: H register at bit position 0 */
static inline bool
decode_opnd_h0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 1, enc, opnd);
}
static inline bool
encode_opnd_h0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 1, opnd, enc_out);
}
/* s0: S register at bit position 0 */
static inline bool
decode_opnd_s0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 2, enc, opnd);
}
static inline bool
encode_opnd_s0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 2, opnd, enc_out);
}
/* d0: D register at bit position 0 */
static inline bool
decode_opnd_d0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 3, enc, opnd);
}
static inline bool
encode_opnd_d0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 3, opnd, enc_out);
}
/* q0: Q register at bit position 0 */
static inline bool
decode_opnd_q0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 4, enc, opnd);
}
static inline bool
encode_opnd_q0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 4, opnd, enc_out);
}
/* z0: Z register at bit position 0. */
static inline bool
decode_opnd_z0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(DR_REG_Z0 + extract_uint(enc, 0, 5));
return true;
}
static inline bool
encode_opnd_z0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_z(0, opnd, enc_out);
}
/* q0p1: as q0 but add 1 mod 32 to reg number */
static inline bool
decode_opnd_q0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_q0p(1, enc, opnd);
}
static inline bool
encode_opnd_q0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_q0p(1, opnd, enc_out);
}
/* q0p2: as q0 but add 2 mod 32 to reg number */
static inline bool
decode_opnd_q0p2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_q0p(2, enc, opnd);
}
static inline bool
encode_opnd_q0p2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_q0p(2, opnd, enc_out);
}
/* q0p3: as q0 but add 3 mod 32 to reg number */
static inline bool
decode_opnd_q0p3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_q0p(3, enc, opnd);
}
static inline bool
encode_opnd_q0p3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_q0p(3, opnd, enc_out);
}
/* prfop: prefetch operation, such as PLDL1KEEP */
static inline bool
decode_opnd_prfop(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(0, 5, false, 0, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_prfop(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(0, 5, false, 0, 0, opnd, enc_out);
}
/* op2: 3-bit immediate from bits 5-7 */
static inline bool
decode_opnd_op2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(5, 3, false, 0, OPSZ_3b, 0, enc, opnd);
}
static inline bool
encode_opnd_op2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(5, 3, false, 0, 0, opnd, enc_out);
}
/* w5: W register or WZR at bit position 5 */
static inline bool
decode_opnd_w5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 5, enc, opnd);
}
static inline bool
encode_opnd_w5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 5, opnd, enc_out);
}
/* x5: X register or XZR at position 5 */
static inline bool
decode_opnd_x5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 5, enc, opnd);
}
static inline bool
encode_opnd_x5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 5, opnd, enc_out);
}
/* x5: X register or XSP at position 5 */
static inline bool
decode_opnd_x5sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, true, 5, enc, opnd);
}
static inline bool
encode_opnd_x5sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, true, 5, opnd, enc_out);
}
/* b5: B register at bit position 5 */
static inline bool
decode_opnd_b5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(5, 0, enc, opnd);
}
static inline bool
encode_opnd_b5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(5, 0, opnd, enc_out);
}
/* h5: H register at bit position 5 */
static inline bool
decode_opnd_h5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(5, 1, enc, opnd);
}
static inline bool
encode_opnd_h5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(5, 1, opnd, enc_out);
}
/* s5: S register at bit position 5 */
static inline bool
decode_opnd_s5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(5, 2, enc, opnd);
}
static inline bool
encode_opnd_s5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(5, 2, opnd, enc_out);
}
/* d5: D register at bit position 5 */
static inline bool
decode_opnd_d5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(5, 3, enc, opnd);
}
static inline bool
encode_opnd_d5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(5, 3, opnd, enc_out);
}
/* q5: Q register at bit position 5 */
static inline bool
decode_opnd_q5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(5, 4, enc, opnd);
}
static inline bool
encode_opnd_q5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(5, 4, opnd, enc_out);
}
/* z5: Z register at bit position 5. */
static inline bool
decode_opnd_z5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(DR_REG_Z0 + extract_uint(enc, 5, 5));
return true;
}
static inline bool
encode_opnd_z5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_z(5, opnd, enc_out);
}
/* mem9qpost: post-indexed mem9q, so offset is zero */
static inline bool
decode_opnd_mem9qpost(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(16, true, enc, opnd);
}
static inline bool
encode_opnd_mem9qpost(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(16, true, opnd, enc_out);
}
/* vmsz: B/H/S/D for load/store multiple structures */
static inline bool
decode_opnd_vmsz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(10, 2, false, 0, OPSZ_2b, 0, enc, opnd);
}
static inline bool
encode_opnd_vmsz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(10, 2, false, 0, 0, opnd, enc_out);
}
/* imm4: immediate operand for some system instructions */
static inline bool
decode_opnd_imm4(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(8, 4, false, 0, OPSZ_4b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm4(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(8, 4, false, 0, 0, opnd, enc_out);
}
#define CMODE_MSL_BIT 28
/* cmode4_s_sz_msl: Operand for 32 bit elements' shift amount (shifting ones) */
static inline bool
decode_opnd_cmode4_s_sz_msl(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
/* cmode size shift amounts
* 110x 32 8,16
* This is an MSL (Modified Shift Left). Unlike an LSL (Logical Shift
* Left), this left shift shifts ones instead of zeros into the low order
* bits.
*
* The element size and shift amount are stored as two 32 bit numbers in
* sz_shft. This is a workaround until issue i#4393 is addressed.
*/
const int cmode4 = extract_uint(enc, 12, 1);
const int size = 32;
const int shift = ((cmode4 == 0) ? 8 : 16) | (1U << CMODE_MSL_BIT);
uint64 sz_shft = ((uint64)size << 32) | shift;
*opnd = opnd_create_immed_int(sz_shft, OPSZ_8);
return true;
}
static inline bool
encode_opnd_cmode4_s_sz_msl(uint enc, int opcode, byte *pc, opnd_t opnd,
OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
int64 sz_shft = opnd_get_immed_int(opnd);
int shift = (int)(sz_shft & 0xffffffff);
if (!TEST(1U << CMODE_MSL_BIT, shift)) // MSL bit should be set
return false;
shift &= 0xff;
const int size = (int)(sz_shft >> 32);
if (size != 32)
return false;
int cmode4;
if (shift == 8)
cmode4 = 0;
else if (shift == 16)
cmode4 = 1;
else
return false;
opnd = opnd_create_immed_uint(cmode4, OPSZ_1b);
encode_opnd_int(12, 1, false, false, 0, opnd, enc_out);
return true;
}
/* extam: extend amount, a left shift from 0 to 4 */
static inline bool
decode_opnd_extam(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (extract_uint(enc, 10, 3) > 4) /* shift amount must be <= 4 */
return false;
return decode_opnd_int(10, 3, false, 0, OPSZ_3b, 0, enc, opnd);
}
static inline bool
encode_opnd_extam(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(10, 3, false, 0, 0, opnd, &t) ||
extract_uint(t, 10, 3) > 4) /* shift amount must be <= 4 */
return false;
*enc_out = t;
return true;
}
/* cmode_h_sz: Operand for 16 bit elements' shift amount */
static inline bool
decode_opnd_cmode_h_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
/* cmode size amounts
* 10x0 16 0,8
*
* The element size and shift amount are stored as two 32 bit numbers in
* sz_shft. This is a workaround until issue i#4393 is addressed.
*/
const int cmode = extract_uint(enc, 13, 1);
int size = 16;
const int shift = (cmode == 0) ? 0 : 8;
const uint64 sz_shft = ((uint64)size << 32) | shift;
*opnd = opnd_create_immed_int(sz_shft, OPSZ_8);
return true;
}
static inline bool
encode_opnd_cmode_h_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
const int64 sz_shft = opnd_get_immed_int(opnd);
const int shift = (int)(sz_shft & 0xFF);
int size = (int)(sz_shft >> 32);
if (size != 16)
return false;
int cmode;
if (shift == 0)
cmode = 0;
else if (shift == 8)
cmode = 1;
else
return false;
opnd = opnd_create_immed_uint(cmode, OPSZ_1b);
encode_opnd_int(13, 1, false, false, 0, opnd, enc_out);
return true;
}
/* p10_low: P register at bit position 10; P0-P7 */
static inline bool
decode_opnd_p10_low(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(DR_REG_P0 + extract_uint(enc, 10, 3));
return true;
}
static inline bool
encode_opnd_p10_low(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_p(10, 7, opnd, enc_out);
}
/* cmode_s_sz: Operand for 32 bit elements' shift amount */
static inline bool
decode_opnd_cmode_s_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
/* cmode size amounts
* 0xx0 32 0,8,16,24
*
* The element size and shift amount are stored as two 32 bit numbers in
* sz_shft. This is a workaround until issue i#4393 is addressed.
*/
const int cmode = extract_uint(enc, 13, 2);
const int size = 32;
int shift;
switch (cmode) {
case 0: shift = 0; break;
case 1: shift = 8; break;
case 2: shift = 16; break;
case 3: shift = 24; break;
default: return false;
}
const uint64 sz_shft = ((uint64)size << 32) | shift;
*opnd = opnd_create_immed_int(sz_shft, OPSZ_8);
return true;
}
static inline bool
encode_opnd_cmode_s_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
const int64 sz_shft = opnd_get_immed_int(opnd);
const int shift = (int)(sz_shft & 0xffffffff);
if (TEST(1U << CMODE_MSL_BIT, shift)) // MSL bit should not be set as this is LSL
return false;
const int size = (int)(sz_shft >> 32);
if (size != 32)
return false;
int cmode;
switch (shift) {
case 0: cmode = 0; break;
case 8: cmode = 1; break;
case 16: cmode = 2; break;
case 24: cmode = 3; break;
default: return false;
}
opnd = opnd_create_immed_uint(cmode, OPSZ_2b);
encode_opnd_int(13, 2, false, false, 0, opnd, enc_out);
return true;
}
/* len: imm2 at bits 13 & 14 */
static inline bool
decode_opnd_len(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(13, 2, false, 0, OPSZ_2b, 0, enc, opnd);
}
static inline bool
encode_opnd_len(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(13, 2, false, 0, 0, opnd, enc_out);
}
/* imm4 encoded in bits 11-14 */
static inline bool
decode_opnd_imm4idx(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint value = extract_uint(enc, 11, 4);
*opnd = opnd_create_immed_uint(value, OPSZ_4b);
return true;
}
static inline bool
encode_opnd_imm4idx(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
return encode_opnd_int(11, 4, false, 0, 0, opnd, enc_out);
}
/* w10: W register or WZR at bit position 10 */
static inline bool
decode_opnd_w10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 10, enc, opnd);
}
static inline bool
encode_opnd_w10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 10, opnd, enc_out);
}
/* x10: X register or XZR at bit position 10 */
static inline bool
decode_opnd_x10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 10, enc, opnd);
}
static inline bool
encode_opnd_x10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 10, opnd, enc_out);
}
/* s10: S register at bit position 10 */
static inline bool
decode_opnd_s10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(10, 2, enc, opnd);
}
static inline bool
encode_opnd_s10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(10, 2, opnd, enc_out);
}
/* d10: D register at bit position 10 */
static inline bool
decode_opnd_d10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(10, 3, enc, opnd);
}
static inline bool
encode_opnd_d10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(10, 3, opnd, enc_out);
}
/* q10: Q register at bit position 10 */
static inline bool
decode_opnd_q10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(10, 4, enc, opnd);
}
static inline bool
encode_opnd_q10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(10, 4, opnd, enc_out);
}
/* cmode4_b_sz : Operand for byte elements' shift amount
*/
static inline bool
decode_opnd_cmode4_b_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
/* cmode size shift amount
* 1110 8 0
*
* The element size and shift amount are stored as two 32 bit numbers in
* sz_shft. This is a workaround until issue i#4393 is addressed.
*/
if ((enc & 0xf000) != 0xe000)
return false;
const int size = 8;
const uint64 sz_shft = (uint64)size << 32;
*opnd = opnd_create_immed_int(sz_shft, OPSZ_8);
return true;
}
static inline bool
encode_opnd_cmode4_b_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
const int size = 8;
if (opnd_is_immed_int(opnd) && opnd_get_immed_int(opnd) == ((uint64)size << 32))
return true;
return false;
}
/* ext: extend type, dr_extend_type_t */
static inline bool
decode_opnd_ext(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(13, 3, false, 0, OPSZ_3b, DR_OPND_IS_EXTEND, enc, opnd);
}
static inline bool
encode_opnd_ext(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(13, 3, false, 0, DR_OPND_IS_EXTEND, opnd, enc_out);
}
/* crn: 4-bit immediate from bits 12-15 */
static inline bool
decode_opnd_crn(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(12, 4, false, 0, OPSZ_4b, 0, enc, opnd);
}
static inline bool
encode_opnd_crn(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(12, 4, false, 0, 0, opnd, enc_out);
}
/* cond: condition operand for conditional compare */
static inline bool
decode_opnd_cond(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(12, 4, false, 0, OPSZ_4b, DR_OPND_IS_CONDITION, enc, opnd);
}
static inline bool
encode_opnd_cond(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(12, 4, false, 0, 0, opnd, enc_out);
}
/* scale: The scalar encoding of #fbits operand. This is the number of bits
* after the decimal point for fixed-point values.
*/
static inline bool
decode_opnd_scale(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint scale = extract_uint(enc, 10, 6);
*opnd = opnd_create_immed_int(64 - scale, OPSZ_6b);
return true;
}
static inline bool
encode_opnd_scale(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
ptr_int_t fbits;
if (!opnd_is_immed_int(opnd))
return false;
fbits = opnd_get_immed_int(opnd);
if (fbits < 1 || fbits > 64)
return false;
*enc_out = (64 - fbits) << 10; /* 'scale' bitfield in encoding */
return true;
}
static inline bool
decode_opnd_imm16_0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint value = extract_uint(enc, 0, 16);
*opnd = opnd_create_immed_int(value, OPSZ_2);
return true;
}
static inline bool
encode_opnd_imm16_0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint value;
if (!opnd_is_immed_int(opnd))
return false;
value = opnd_get_immed_int(opnd);
opnd = opnd_create_immed_uint(value, OPSZ_2);
uint enc_value;
encode_opnd_int(0, 16, false, false, 0, opnd, &enc_value);
*enc_out = enc_value;
return true;
}
/* op1: 3-bit immediate from bits 16-18 */
static inline bool
decode_opnd_op1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(16, 3, false, 0, OPSZ_3b, 0, enc, opnd);
}
static inline bool
encode_opnd_op1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(16, 3, false, 0, 0, opnd, enc_out);
}
/* fpimm8: immediate operand for SIMD fmov */
static inline bool
decode_opnd_fpimm8(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
/* See Arm Architecture Reference Manual
*
* Immediate is encoded as 8 bits. Bits 5->9 and 16->18. LSB is bit 5:
* imm8 = a:b:c:d:e:f:g:h (LSB)
*
* Half-precision (v8.2)
* --------------
*
* imm16 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,2):imm8<5:0>:Zeros(6);
* a:~b:bb:cdefgh:000000
*
* datasize = if Q == '1' then 128 else 64;
* imm = Replicate(imm16, datasize DIV 16);
* = imm16:imm16:imm16:imm16 (Q=0 -> 64)
* = imm16:imm16:imm16:imm16:imm16:imm16:imm16:imm16 (Q=1 -> 128)
*
* Single-precision (TODO)
* ----------------
* Assume cmode = 1111 and op = 0
*
* imm32 = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19);
* a:~b:bbbbb:cdefgh:0000000000000000000
*
* imm64 = Replicate(imm32, 2);
* = a:~b:bbbbb:cdefgh:0000000000000000000 a:~b:bbbbb:cdefgh:0000000000000000000
*
* datasize = if Q == '1' then 128 else 64;
* imm = Replicate(imm64, datasize DIV 64);
* = imm64 (Q=0)
* = imm64:imm64 (Q=1)
*/
union {
#ifdef HAVE_HALF_FLOAT
__fp16 f;
uint16_t i;
#else
/* For platforms on which 16 bit (half-precision) FP is not yet available. */
float f;
uint32_t i;
#endif
} fpv;
int abc = extract_uint(enc, 16, 3);
int defgh = extract_uint(enc, 5, 5);
uint a = (abc & 0x4);
uint b = (abc & 0x2);
uint not_b = b == 0 ? 1 : 0;
#ifdef HAVE_HALF_FLOAT
uint bb = ((b == 0) ? 0 : 0x3);
#else
uint bbbbb = ((b == 0) ? 0 : 0x1f);
#endif
uint cdefgh = ((abc & 0x1) << 5) | (defgh & 0x1f);
#ifdef HAVE_HALF_FLOAT
uint16_t imm16 = (a << 13) | (not_b << 14) | (bb << 12) | (cdefgh << 6);
fpv.i = imm16;
#else
uint32_t imm32 = (a << 29) | (not_b << 30) | (bbbbb << 25) | (cdefgh << 19);
fpv.i = imm32;
#endif
*opnd = opnd_create_immed_float(fpv.f);
return true;
}
static inline bool
encode_opnd_fpimm8(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
/* Based on the IEEE 754-2008 standard but with Arm-specific details that
* are left open by the standard. See Arm Architecture Reference Manual.
*
* Half-precision example
* __ ________
* S/exp\/fraction\
* _
* abbbcdefgh000000
* 0011110000000000 = 1.0
* _
* abbb cdef gh00 0000
* 0x8 0 0 0 a
* 0x1 0 0 0 b
* 0x0 8 0 0 c
* 0x0 7 c 0 defgh
*/
union {
#ifdef HAVE_HALF_FLOAT
__fp16 f;
uint16_t i;
#else
/* For platforms on which 16 bit (half-precision) FP is not yet available. */
float f;
uint32_t i;
#endif
} fpv;
if (!opnd_is_immed_float(opnd))
return false;
fpv.f = opnd_get_immed_float(opnd);
#ifdef HAVE_HALF_FLOAT
uint16_t imm = fpv.i;
uint a = (imm & 0x8000);
uint b = (imm & 0x1000);
uint c = (imm & 0x800);
uint defgh = (imm & 0x7c0);
/* 3332 2222 2222 1111 1111 11
* 1098 7654 3210 9876 5432 1098 7654 3210
* ---- ---- ---- -abc ---- --de fgh- ---- immediate encoding
* 0x8000 |<-3| | ||
* 0x1000 |<-5--| ||
* 0x800 |<--5--||
* 0x7c0 |>
*/
*enc_out = (a << 3) | (b << 5) | (c << 5) | (defgh >> 1);
#else
/* 3332 2222 2222 1111 1111 11
* 1098 7654 3210 9876 5432 1098 7654 3210
* _
* abbb bbbc defg h000 0000 0000 0000 0000
*/
uint32_t imm = fpv.i;
uint a = (imm & 0x80000000);
uint b = (imm & 0x10000000);
uint c = (imm & 0x1000000);
uint defgh = (imm & 0xf80000);
*enc_out = (a >> 13) | (b >> 11) | (c >> 8) | (defgh >> 14);
#endif
return true;
}
/* imm8: an 8 bit uint stitched together from 2 parts of bits 16-18 and 5-9*/
static inline bool
decode_opnd_imm8(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int value_0 = extract_uint(enc, 16, 3);
int value_1 = extract_uint(enc, 5, 5);
int value = (value_0 << 5) | value_1;
*opnd = opnd_create_immed_uint(value, OPSZ_1);
return true;
}
static inline bool
encode_opnd_imm8(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
uint eight_bits = opnd_get_immed_int(opnd);
uint enc_top = 0;
opnd = opnd_create_immed_uint((eight_bits >> 5) & 0b111, OPSZ_3b);
encode_opnd_int(16, 3, false, false, 0, opnd, &enc_top);
uint enc_bottom = 0;
opnd = opnd_create_immed_uint(eight_bits & 0b11111, OPSZ_5b);
encode_opnd_int(5, 5, false, false, 0, opnd, &enc_bottom);
*enc_out = enc_top | enc_bottom;
return true;
}
/* exp_imm8 Encode and decode functions for the expanded imm format
The expanded imm format takes the bits from 16-18 and 5-9 and expands
them to a 64bit int.
It does this by taking each bit in turn and repeating it 8 times so,
abcdefgh
becomes
aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeefffffffgggggggghhhhhhh
*/
static inline bool
decode_opnd_exp_imm8(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
const uint repeats = 8;
uint upper_bits = extract_uint(enc, 16, 3);
uint lower_bits = extract_uint(enc, 5, 5);
uint bit_value = (upper_bits << 5) | lower_bits;
uint64 value = 0;
for (uint i = 0; i < repeats; i++) {
uint64 bit = (bit_value & (1 << i)) >> i;
if (bit == 1) /* bit = 0 is already set, don't do unnecessary work*/
for (uint j = 0; j < repeats; j++)
value |= bit << (i * repeats + j);
}
*opnd = opnd_create_immed_uint(value, OPSZ_8);
return true;
}
static inline bool
encode_opnd_exp_imm8(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
uint64 value = opnd_get_immed_int(opnd);
const uint first_top_bit = 5;
const uint num_top_bits = 3;
const uint first_bottom_bit = 0;
const uint num_bottom_bits = 5;
/*
The below code recompresses the repeated bits by selecting the first
bit of the group &(1 << (i * 8)) and then shifts it back to its
original position (i *7 + offset)
*/
uint top_bits = 0;
uint enc_top = 0;
for (uint i = first_top_bit; i < first_top_bit + num_top_bits; i++)
top_bits |= (value & (uint64)1 << (i * 8)) >> (i * 7 + first_top_bit);
opnd = opnd_create_immed_uint(top_bits, OPSZ_3b);
encode_opnd_int(16, num_top_bits, false, false, 0, opnd, &enc_top);
uint bottom_bits = 0;
uint enc_bottom = 0;
for (uint i = first_bottom_bit; i < first_bottom_bit + num_bottom_bits; i++)
bottom_bits |= (value & (uint64)1 << (i * 8)) >> (i * 7 + first_bottom_bit);
opnd = opnd_create_immed_uint(bottom_bits, OPSZ_5b);
encode_opnd_int(5, num_bottom_bits, false, false, 0, opnd, &enc_bottom);
*enc_out = enc_top | enc_bottom;
return true;
}
/* sysops: immediate operand for SYS instruction which specifies SYS operations */
static inline bool
decode_opnd_sysops(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(5, 14, false, 0, OPSZ_2, 0, enc, opnd);
}
static inline bool
encode_opnd_sysops(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(5, 14, false, 0, 0, opnd, enc_out);
}
/* sysreg: system register, operand of MRS/MSR */
static inline bool
decode_opnd_sysreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = decode_sysreg(extract_uint(enc, 5, 15));
return true;
}
static inline bool
encode_opnd_sysreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_sysreg(&t, opnd))
return false;
*enc_out = t << 5;
return true;
}
/* helper function for getting the index of the least
significant high bit of a 5 bit immediate, e.g.
00001 = 0, 00010 = 1, 00100 = 2 ...
*/
static inline int
get_imm5_offset(int val)
{
for (int i = 0; i < 4; i++) {
if ((1 << i) & val) {
return i;
}
}
return -1;
}
/* wx5_imm5: bits 5-9 is a GPR whos width is dependent on information in
an imm5 from bits 16-20
*/
static inline bool
decode_opnd_wx5_imm5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint imm5 = extract_int(enc, 16, 5);
bool is_x_register = get_imm5_offset(imm5) == 3 ? true : false;
*opnd = opnd_create_reg(decode_reg(extract_uint(enc, 5, 5), is_x_register, false));
return true;
}
static inline bool
encode_opnd_wx5_imm5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_reg(opnd))
ASSERT(false);
uint num;
bool is_x;
if (!encode_reg(&num, &is_x, opnd_get_reg(opnd), false))
ASSERT(false);
*enc_out = num << 5;
return true;
}
/* imm5: immediate operand for conditional compare (immediate) */
static inline bool
decode_opnd_imm5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(16, 5, false, 0, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(16, 5, false, 0, 0, opnd, enc_out);
}
/* w16: W register or WZR at bit position 16 */
static inline bool
decode_opnd_w16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 16, enc, opnd);
}
static inline bool
encode_opnd_w16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 16, opnd, enc_out);
}
/* w16p0: even-numbered W register or WZR at bit position 16 */
static inline bool
decode_opnd_w16p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 0, 16, enc, opnd);
}
static inline bool
encode_opnd_w16p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 0, 16, opnd, enc_out);
}
/* w16p1: even-numbered W register or WZR at bit position 16, add 1 */
static inline bool
decode_opnd_w16p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 1, 16, enc, opnd);
}
static inline bool
encode_opnd_w16p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 1, 16, opnd, enc_out);
}
/* x16: X register or XZR at bit position 16 */
static inline bool
decode_opnd_x16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 16, enc, opnd);
}
static inline bool
encode_opnd_x16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 16, opnd, enc_out);
}
/* x16p0: even-numbered X register or XZR at bit position 16 */
static inline bool
decode_opnd_x16p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 0, 16, enc, opnd);
}
static inline bool
encode_opnd_x16p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 0, 16, opnd, enc_out);
}
/* x16p1: even-numbered X register or XZR at bit position 16, add 1 */
static inline bool
decode_opnd_x16p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 1, 16, enc, opnd);
}
static inline bool
encode_opnd_x16p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 1, 16, opnd, enc_out);
}
/* d16: D register at bit position 16 */
static inline bool
decode_opnd_d16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(16, 3, enc, opnd);
}
static inline bool
encode_opnd_d16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(16, 3, opnd, enc_out);
}
/* q16: Q register at bit position 16 */
static inline bool
decode_opnd_q16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(16, 4, enc, opnd);
}
static inline bool
encode_opnd_q16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(16, 4, opnd, enc_out);
}
/* z16: Z register at bit position 16. */
static inline bool
decode_opnd_z16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(DR_REG_Z0 + extract_uint(enc, 16, 5));
return true;
}
static inline bool
encode_opnd_z16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_z(16, opnd, enc_out);
}
/* b16: B register at bit position 16. */
static inline bool
decode_opnd_b16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(16, 0, enc, opnd);
}
static inline bool
encode_opnd_b16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(16, 0, opnd, enc_out);
}
/* h16: H register at bit position 16. */
static inline bool
decode_opnd_h16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(16, 1, enc, opnd);
}
static inline bool
encode_opnd_h16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(16, 1, opnd, enc_out);
}
/* s16: S register at bit position 16. */
static inline bool
decode_opnd_s16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(16, 2, enc, opnd);
}
static inline bool
encode_opnd_s16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(16, 2, opnd, enc_out);
}
/* mem9off: just the 9-bit offset from mem9 */
static inline bool
decode_opnd_mem9off(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(12, 9, true, 0, OPSZ_PTR, 0, enc, opnd);
}
static inline bool
encode_opnd_mem9off(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(12, 9, true, 0, 0, opnd, enc_out);
}
/* mem9q: memory operand with 9-bit offset; size is 16 bytes */
static inline bool
decode_opnd_mem9q(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(16, false, enc, opnd);
}
static inline bool
encode_opnd_mem9q(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(16, false, opnd, enc_out);
}
/* prf9: prefetch variant of mem9 */
static inline bool
decode_opnd_prf9(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(0, false, enc, opnd);
}
static inline bool
encode_opnd_prf9(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(0, false, opnd, enc_out);
}
/* memreqq: memory operand with register offset; size is 16 bytes */
static inline bool
decode_opnd_memregq(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_memreg_size(OPSZ_16, enc, opnd);
}
static inline bool
encode_opnd_memregq(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_memreg_size(OPSZ_16, opnd, enc_out);
}
/* prfreg: prefetch variant of memreg */
static inline bool
decode_opnd_prfreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_memreg_size(OPSZ_0, enc, opnd);
}
static inline bool
encode_opnd_prfreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_memreg_size(OPSZ_0, opnd, enc_out);
}
/* imm16: 16-bit immediate operand of MOVK/MOVN/MOVZ/SVC */
static inline bool
decode_opnd_imm16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(5, 16, false, 0, OPSZ_12b, 0, enc, opnd);
}
static bool
encode_opnd_instr(int bit_pos, opnd_t opnd, byte *start_pc, instr_t *containing_instr,
OUT uint *enc_out)
{
if (!opnd_is_instr(opnd)) {
return false;
}
ptr_uint_t val =
((ptr_uint_t)instr_get_note(opnd_get_instr(opnd)) -
(ptr_uint_t)instr_get_note(containing_instr) + (ptr_uint_t)start_pc) >>
opnd_get_shift(opnd);
uint bits = opnd_size_in_bits(opnd_get_size(opnd));
// We expect truncation; instrlist_insert_mov_instr_addr splits the instr's
// encoded address into INSTR_kind operands in multiple mov instructions in the
// ilist, each representing a 2-byte portion of the complete address.
val &= ((1 << bits) - 1);
ASSERT((*enc_out & (val << bit_pos)) == 0);
*enc_out |= (val << bit_pos);
return true;
}
static inline bool
encode_opnd_imm16(uint enc, int opcode, byte *start_pc, opnd_t opnd,
instr_t *containing_instr, OUT uint *enc_out)
{
if (opnd_is_immed_int(opnd))
return encode_opnd_int(5, 16, false, 0, 0, opnd, enc_out);
else if (opnd_is_instr(opnd))
return encode_opnd_instr(5, opnd, start_pc, containing_instr, enc_out);
ASSERT_NOT_REACHED();
return false;
}
/* memvr: memory operand for SIMD load structure and replicate */
static inline bool
decode_opnd_memvr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
int regcount;
uint bytes, rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0)
return false;
bytes = opnd_size_in_bytes(opnd_get_size(opnd));
regcount = memvr_regcount(enc);
if (bytes % regcount != 0)
return false;
bytes /= regcount;
if (bytes < 1 || bytes > 8 || (bytes & (bytes - 1)) != 0 ||
opnd_size_from_bytes(bytes * regcount) != opnd_get_size(opnd))
return false;
*enc_out = (rn << 5 | (bytes == 1 ? 0 : bytes == 2 ? 1 : bytes == 4 ? 2 : 3) << 10);
return true;
}
/* memvs: memory operand for SIMD load/store single structure */
static inline bool
decode_opnd_memvs(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = memvs_size(enc);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvs(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0)
return false;
if (opnd_get_size(opnd) != opnd_size_from_bytes(memvs_size(enc)))
return false;
*enc_out = rn << 5;
return true;
}
/* x16immvr: immediate operand for SIMD load structure and replicate (post-indexed) */
static inline bool
decode_opnd_x16immvr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int num = extract_uint(enc, 16, 5);
if (num < 31)
*opnd = opnd_create_reg(DR_REG_X0 + num);
else {
int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2);
*opnd = opnd_create_immed_int(bytes, OPSZ_1);
}
return true;
}
static inline bool
encode_opnd_x16immvr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_is_reg(opnd)) {
uint num = opnd_get_reg(opnd) - DR_REG_X0;
if (num == 31)
return false;
*enc_out = num << 16;
return true;
} else if (opnd_is_immed_int(opnd)) {
ptr_int_t bytes = opnd_get_immed_int(opnd);
if (bytes != memvr_regcount(enc) << extract_uint(enc, 10, 2))
return false;
*enc_out = 31U << 16;
return true;
}
return false;
}
/* x16immvs: immediate operand for SIMD load/store single structure (post-indexed) */
static inline bool
decode_opnd_x16immvs(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int num = extract_uint(enc, 16, 5);
if (num < 31)
*opnd = opnd_create_reg(DR_REG_X0 + num);
else {
int bytes = memvs_size(enc);
*opnd = opnd_create_immed_int(bytes, OPSZ_1);
}
return true;
}
static inline bool
encode_opnd_x16immvs(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_is_reg(opnd)) {
uint num = opnd_get_reg(opnd) - DR_REG_X0;
if (num == 31)
return false;
*enc_out = num << 16;
return true;
} else if (opnd_is_immed_int(opnd)) {
ptr_int_t bytes = opnd_get_immed_int(opnd);
if (bytes != memvs_size(enc))
return false;
*enc_out = 31U << 16;
return true;
}
return false;
}
/* vindex_H: Index for vector with half elements (0-7). */
static inline bool
decode_opnd_vindex_H(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
/* Example encoding:
* FMLA <Vd>.<T>, <Vn>.<T>, <Vm>.H[<index>]
* 3322222222221111111111
* 10987654321098765432109876543210
* 0Q00111100LMRm--0001H0Rn---Rd---
*/
int H = 11;
int L = 21;
int M = 20;
// index=H:L:M
uint bits = (enc >> H & 1) << 2 | (enc >> L & 1) << 1 | (enc >> M & 1);
*opnd = opnd_create_immed_int(bits, OPSZ_3b);
return true;
}
static inline bool
encode_opnd_vindex_H(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
/* Example encoding:
* FMLA <Vd>.<T>, <Vn>.<T>, <Vm>.H[<index>]
* 3322222222221111111111
* 10987654321098765432109876543210
* 0Q00111100LMRm--0001H0Rn---Rd---
*/
int H = 11;
int L = 21;
int M = 20;
ptr_int_t val;
if (!opnd_is_immed_int(opnd))
return false;
val = opnd_get_immed_int(opnd);
if (val < 0 || val >= 8)
return false;
// index=H:L:M
*enc_out = (val >> 2 & 1) << H | (val >> 1 & 1) << L | (val & 1) << M;
return true;
}
/* imm12: 12-bit immediate operand of ADD/SUB */
static inline bool
decode_opnd_imm12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(10, 12, false, 0, OPSZ_12b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(10, 12, false, 0, 0, opnd, enc_out);
}
/* mem12q: memory operand with 12-bit offset; size is 16 bytes */
static inline bool
decode_opnd_mem12q(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem12_scale(4, false, enc, opnd);
}
static inline bool
encode_opnd_mem12q(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem12_scale(4, false, opnd, enc_out);
}
/* prf12: prefetch variant of mem12 */
static inline bool
decode_opnd_prf12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem12_scale(3, true, enc, opnd);
}
static inline bool
encode_opnd_prf12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem12_scale(3, true, opnd, enc_out);
}
/* hsd_immh_sz: The element size of a vector mediated by immh with possible values h, s
* and d
*/
static inline bool
decode_opnd_hsd_immh_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int highest_bit;
if (!highest_bit_set(enc, 19, 4, &highest_bit))
return false;
switch (highest_bit) {
case 0: *opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_HALF, OPSZ_2b); break;
case 1: *opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_SINGLE, OPSZ_2b); break;
case 2: *opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_DOUBLE, OPSZ_2b); break;
default: return false;
}
return true;
}
static inline bool
encode_opnd_hsd_immh_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return true;
}
/* bhsd_immh_sz: The element size of a vector mediated by immh with possible values b, h,
* s and d
*/
static inline bool
decode_opnd_bhsd_immh_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int highest_bit;
if (!highest_bit_set(enc, 19, 4, &highest_bit))
return false;
switch (highest_bit) {
case BYTE_REG: *opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_BYTE, OPSZ_2b); break;
case HALF_REG: *opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_HALF, OPSZ_2b); break;
case SINGLE_REG:
*opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_SINGLE, OPSZ_2b);
break;
case DOUBLE_REG:
*opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_DOUBLE, OPSZ_2b);
break;
default: return false;
}
return true;
}
static inline bool
encode_opnd_bhsd_immh_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return true;
}
static inline bool
decode_hsd_immh_regx(int rpos, uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int offset;
if (!highest_bit_set(enc, 19, 4, &offset))
return false;
/* The binary representation starts at HALF_BIT=0, so shift to align with the normal
offset */
offset += 1;
if (offset < HALF_REG || offset > DOUBLE_REG)
return false;
return decode_opnd_vector_reg(rpos, offset, enc, opnd);
}
static inline bool
encode_hsd_immh_regx(int rpos, uint enc, int opcode, byte *pc, opnd_t opnd,
OUT uint *enc_out)
{
if (!opnd_is_reg(opnd))
return false;
reg_t reg = opnd_get_reg(opnd);
aarch64_reg_offset offset = get_reg_offset(reg);
if (offset == BYTE_REG || offset > DOUBLE_REG)
return false;
return encode_opnd_vector_reg(rpos, offset, opnd, enc_out);
}
static inline bool
decode_bhsd_immh_regx(int rpos, uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int highest_bit;
if (!highest_bit_set(enc, 19, 4, &highest_bit))
return false;
if (highest_bit < 0 || highest_bit > 3)
return false;
return decode_opnd_vector_reg(rpos, highest_bit, enc, opnd);
}
static inline bool
encode_bhsd_immh_regx(int rpos, uint enc, int opcode, byte *pc, opnd_t opnd,
OUT uint *enc_out)
{
if (!opnd_is_reg(opnd))
return false;
reg_t reg = opnd_get_reg(opnd);
aarch64_reg_offset offset = get_reg_offset(reg);
if (offset > DOUBLE_REG)
return false;
return encode_opnd_vector_reg(rpos, offset, opnd, enc_out);
}
static inline bool
decode_opnd_hsd_immh_reg0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_hsd_immh_regx(0, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_hsd_immh_reg0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_hsd_immh_regx(0, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_opnd_bhsd_immh_reg0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_bhsd_immh_regx(0, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_bhsd_immh_reg0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_bhsd_immh_regx(0, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_opnd_hsd_immh_reg5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_hsd_immh_regx(5, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_hsd_immh_reg5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_hsd_immh_regx(5, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_opnd_bhsd_immh_reg5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_bhsd_immh_regx(5, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_bhsd_immh_reg5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_bhsd_immh_regx(5, enc, opcode, pc, opnd, enc_out);
}
/* vindex_SD: Index for vector with single or double elements. */
static inline bool
decode_opnd_vindex_SD(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
/* Example encoding:
* FMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<Ts>[<index>]
* 3322222222221111111111
* 10987654321098765432109876543210
* 0Q0011111sLMRm--0001H0Rn---Rd---
* z
*/
int sz = 22;
int H = 11;
int L = 21;
uint bits;
if ((enc >> sz & 1) == 0) { // Single
bits = (enc >> H & 1) << 1 | (enc >> L & 1); // index=H:L
} else { // Double
if ((enc >> L & 1) != 0) {
return false;
}
bits = enc >> H & 1; // index=H
}
*opnd = opnd_create_immed_int(bits, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_vindex_SD(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
/* Example encoding:
* FMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<Ts>[<index>]
* 3322222222221111111111
* 10987654321098765432109876543210
* 0Q0011111sLMRm--0001H0Rn---Rd---
* z
*/
int sz = 22;
int H = 11;
int L = 21;
ptr_int_t val;
if (!opnd_is_immed_int(opnd))
return false;
val = opnd_get_immed_int(opnd);
if ((enc >> sz & 1) == 0) { // Single
if (val < 0 || val >= 4)
return false;
*enc_out = (val & 1) << L | (val >> 1 & 1) << H; // index=H:L
} else { // Double
if (val < 0 || val >= 2)
return false;
*enc_out = (val & 1) << H; // index=H
}
return true;
}
/* imm12sh: shift amount for 12-bit immediate of ADD/SUB, 0 or 16 */
static inline bool
decode_opnd_imm12sh(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(22, 1, false, 4, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm12sh(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(22, 1, false, 4, 0, opnd, enc_out);
}
/* sd_sz: Operand size for single and double precision encoding of floating point
* vector instructions. We need to convert the generic size operand to the right
* encoding bits. It only supports VECTOR_ELEM_WIDTH_SINGLE and VECTOR_ELEM_WIDTH_DOUBLE.
*/
static inline bool
decode_opnd_sd_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (((enc >> 22) & 1) == 0) {
*opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_SINGLE, OPSZ_2b);
return true;
}
if (((enc >> 22) & 1) == 1) {
*opnd = opnd_create_immed_int(VECTOR_ELEM_WIDTH_DOUBLE, OPSZ_2b);
return true;
}
return false;
}
static inline bool
encode_opnd_sd_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
if (opnd_get_immed_int(opnd) == VECTOR_ELEM_WIDTH_SINGLE) {
*enc_out = 0;
return true;
}
if (opnd_get_immed_int(opnd) == VECTOR_ELEM_WIDTH_DOUBLE) {
*enc_out = 1 << 22;
return true;
}
return false;
}
/* dq5_sz: D/Q register at bit position 5; bit 22 selects Q reg */
static inline bool
decode_opnd_dq5_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(0, 5, 22, enc, opnd);
}
static inline bool
encode_opnd_dq5_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(0, 5, 22, opnd, enc_out);
}
static inline bool
immhb_shf_decode(uint enc, int opcode, byte *pc, OUT opnd_t *opnd, uint min_shift)
{
int highest_bit;
if (!highest_bit_set(enc, 19, 4, &highest_bit))
return false;
uint esize = 8 << highest_bit;
uint immhb_shf = extract_uint(enc, 16, 4 + highest_bit);
opnd_size_t shift_size;
switch (highest_bit) {
case 0: shift_size = OPSZ_3b; break;
case 1: shift_size = OPSZ_4b; break;
case 2: shift_size = OPSZ_5b; break;
case 3: shift_size = OPSZ_6b; break;
default: return false;
}
if (min_shift == 1)
*opnd = opnd_create_immed_int((2 * esize) - immhb_shf, shift_size);
else if (min_shift == 0)
*opnd = opnd_create_immed_int(immhb_shf - esize, shift_size);
else
return false;
opnd_add_flags(*opnd, DR_OPND_IS_SHIFT);
return true;
}
static inline bool
immhb_shf_encode(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out,
uint min_shift)
{
opnd_size_t shift_size = opnd_get_size(opnd);
uint highest_bit;
switch (shift_size) {
case OPSZ_3b: highest_bit = 0; break;
case OPSZ_4b: highest_bit = 1; break;
case OPSZ_5b: highest_bit = 2; break;
case OPSZ_6b: highest_bit = 3; break;
default: return false;
}
ptr_int_t shift_amount;
uint esize = 8 << highest_bit;
if (!opnd_is_immed_int(opnd))
return false;
shift_amount = opnd_get_immed_int(opnd);
uint shift_encoding, max_shift;
if (min_shift == 0) {
shift_encoding = shift_amount + esize;
max_shift = esize - 1;
} else if (min_shift == 1) {
shift_encoding = esize * 2 - shift_amount;
max_shift = esize;
} else
return false;
if (shift_amount < min_shift || shift_amount > max_shift)
return false;
*enc_out = (shift_encoding << 16);
return true;
}
/* immhb_shf: The vector encoding of #shift operand.
*/
static inline bool
decode_opnd_immhb_shf(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return immhb_shf_decode(enc, opcode, pc, opnd, 1);
}
static inline bool
encode_opnd_immhb_shf(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return immhb_shf_encode(enc, opcode, pc, opnd, enc_out, 1);
}
/* immhb_shf2: The vector encoding of #shift operand.
*/
static inline bool
decode_opnd_immhb_0shf(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return immhb_shf_decode(enc, opcode, pc, opnd, 0);
}
static inline bool
encode_opnd_immhb_0shf(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return immhb_shf_encode(enc, opcode, pc, opnd, enc_out, 0);
}
/* immhb_fxp: The vector encoding of #fbits operand. This is the number of bits
* after the decimal point for fixed-point values.
*/
static inline bool
decode_opnd_immhb_fxp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return immhb_shf_decode(enc, opcode, pc, opnd, 1);
}
static inline bool
encode_opnd_immhb_fxp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return immhb_shf_encode(enc, opcode, pc, opnd, enc_out, 1);
}
/* fpimm13: floating-point immediate for scalar fmov */
static inline bool
decode_opnd_fpimm13(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
/*
* From the Architecture Reference Manual, 8 bit immediate abcdefgh maps to
* floats:
*
* 3332 2222 2222 1111 1111 11
* 1098 7654 3210 9876 5432 1098 7654 3210
* _ abcd efgh <- 8 bit immediate mapped to
* abbb bbbc defg h000 0000 0000 0000 0000 <- 32 bit float
*
* abcd efgh Masks
* 0x1 0 a
* 0x4 0 b
* 0x2 0 c
* 0x1 F defgh
*/
if (extract_uint(enc, 22, 1) == 0) { /* 32 bits */
union {
float f;
uint32_t i;
} fpv;
uint32_t imm = extract_uint(enc, 13, 8);
uint32_t a = imm & 0x80;
uint32_t b = imm & 0x40;
uint32_t not_b = ((b == 0) ? 1 : 0);
uint32_t bbbbb = ((b == 0) ? 0 : 0x1f);
uint32_t c = imm & 0x20;
uint32_t defgh = imm & 0x1f;
uint32_t imm32 =
(a << 24) | (not_b << 30) | (bbbbb << 25) | (c << 19) | (defgh << 19);
fpv.i = imm32;
*opnd = opnd_create_immed_float(fpv.f);
} else { /* 64 bits */
/* 6666 5555 5555 5544 44444444 33333333 33322222 22221111 111111
* 3210 9876 5432 1098 76543210 98765432 10987654 32109876 54321098 76543210
* _ abcdefgh
* abbb bbbb bbcd efgh 00000000 00000000 00000000 00000000 00000000 00000000
*/
union {
double d;
uint64_t i;
} fpv;
uint64_t imm = extract_uint(enc, 13, 8);
uint64_t a = imm & 0x80;
uint64_t b = imm & 0x40;
uint64_t not_b = ((b == 0) ? 1 : 0);
uint64_t bbbbbbbb = ((b == 0) ? 0 : 0xff);
uint64_t c = imm & 0x20;
uint64_t defgh = imm & 0x1f;
uint64_t imm64 =
(a << 56) | (not_b << 62) | (bbbbbbbb << 54) | (c << 48) | (defgh << 48);
fpv.i = imm64;
*opnd = opnd_create_immed_double(fpv.d);
}
return true;
}
static inline bool
encode_opnd_fpimm13(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
/*
* From the Architecture Reference Manual, 8 bit immediate abcdefgh maps to
* floats:
*
* 3332 2222 2222 1111 1111 11
* 1098 7654 3210 9876 5432 1098 7654 3210
* _
* abbb bbbc defg h000 0000 0000 0000 0000
* 0x8 0 0 0 0 0 0 0 a
* 0x1 0 0 0 0 0 0 0 b
* 0x0 1 0 0 0 0 0 0 c
* 0x0 0 f 8 0 0 0 0 defgh
*/
if (opnd_is_immed_float(opnd)) {
ASSERT(extract_uint(enc, 22, 1) == 0); /* 32 bit floating point */
union {
float f;
uint32_t i;
} fpv;
fpv.f = opnd_get_immed_float(opnd);
uint32_t imm = fpv.i;
uint a = (imm & 0x80000000);
uint b = (imm & 0x10000000);
uint c = (imm & 0x01000000);
uint defgh = (imm & 0x00f80000);
/* 3332 2222 2222 1111 1111 11
* 1098 7654 3210 9876 5432 1098 7654 3210
* ---- ---- ---a bcde fgh- ---- ---- ---- immediate encoding
* |-----11---->| 0x80000000 a
* |-----9---->| 0x10000000 b
* |---6-->| 0x01000000 c
* |--6-->| 0x00f80000 defgh
*/
*enc_out = (a >> 11) | (b >> 9) | (c >> 6) | (defgh >> 6);
} else if (opnd_is_immed_double(opnd)) {
ASSERT(extract_uint(enc, 22, 1) == 1); /* 64 bit floating point */
/* 6666 5555 5555 5544 44444444 33333333 33322222 22221111 111111
* 3210 9876 5432 1098 76543210 98765432 10987654 32109876 54321098 76543210
* _
* abbb bbbb bbcd efgh 00000000 00000000 00000000 00000000 00000000 00000000
*
* ---- ---- ---a bcde fgh----- -------- immediate encoding
*/
union {
double d;
uint64_t i;
} fpv;
fpv.d = opnd_get_immed_double(opnd);
uint64_t imm = fpv.i;
uint64_t a = (imm & 0x8000000000000000);
uint64_t b = (imm & 0x1000000000000000);
uint64_t c = (imm & 0x0020000000000000);
uint64_t defgh = (imm & 0x001f000000000000);
*enc_out =
(((a >> 11) | (b >> 9) | (c >> 3) | (defgh >> 3)) & 0xffffffff00000000) >> 32;
} else
return false;
return true;
}
/* b_sz: Vector element width for SIMD instructions. */
static inline bool
decode_opnd_b_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint bits = enc >> 22 & 3;
if (bits != 0)
return false;
*opnd = opnd_create_immed_int(bits, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_b_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
ptr_int_t val = opnd_get_immed_int(opnd);
if (val != 0)
return false;
*enc_out = val << 22;
return true;
}
/* hs_sz: Vector element width for SIMD instructions. */
static inline bool
decode_opnd_hs_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint bits = enc >> 22 & 3;
if (bits != 1 && bits != 2)
return false;
*opnd = opnd_create_immed_int(bits, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_hs_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
ptr_int_t val = opnd_get_immed_int(opnd);
if (val < 1 || val > 2)
return false;
*enc_out = val << 22;
return true;
}
/* bhs_sz: Vector element width for SIMD instructions. */
static inline bool
decode_opnd_bhs_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint bits = enc >> 22 & 3;
if (bits != 0 && bits != 1 && bits != 2)
return false;
*opnd = opnd_create_immed_int(bits, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_bhs_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
ptr_int_t val = opnd_get_immed_int(opnd);
if (val < 0 || val > 2)
return false;
*enc_out = val << 22;
return true;
}
/* bhsd_sz: Vector element width for SIMD instructions. */
static inline bool
decode_opnd_bhsd_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint bits = enc >> 22 & 3;
*opnd = opnd_create_immed_int(bits, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_bhsd_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_immed_int(opnd))
return false;
ptr_int_t val = opnd_get_immed_int(opnd);
if (val < 0 || val > 3)
return false;
*enc_out = val << 22;
return true;
}
/* bd_sz: Vector element width for SIMD instructions. */
static inline bool
decode_opnd_bd_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint bits = enc >> 22 & 3;
if (bits != 0 && bits != 3)
return false;
*opnd = opnd_create_immed_int(bits, OPSZ_2b);
return true;
}
static inline bool
encode_opnd_bd_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
ptr_int_t val = opnd_get_immed_int(opnd);
if (val != 0 && val != 3)
return false;
*enc_out = val << 22;
return true;
}
/* shift3: shift type for ADD/SUB: LSL, LSR or ASR */
static inline bool
decode_opnd_shift3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (extract_uint(enc, 22, 2) == 3)
return false;
return decode_opnd_int(22, 2, false, 0, OPSZ_3b, DR_OPND_IS_SHIFT, enc, opnd);
}
static inline bool
encode_opnd_shift3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(22, 2, false, 0, DR_OPND_IS_SHIFT, opnd, &t) ||
extract_uint(t, 22, 2) == 3)
return false;
*enc_out = t;
return true;
}
/* shift4: shift type for logical operation: LSL, LSR, ASR or ROR */
static inline bool
decode_opnd_shift4(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(22, 2, false, 0, OPSZ_3b, DR_OPND_IS_SHIFT, enc, opnd);
}
static inline bool
encode_opnd_shift4(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(22, 2, false, 0, DR_OPND_IS_SHIFT, opnd, enc_out);
}
static inline bool
decode_scalar_size_regx(uint size_offset, int rpos, uint enc, int opcode, byte *pc,
OUT opnd_t *opnd)
{
uint size = extract_uint(enc, 22, 2);
if (size < 0 || size > (3 - size_offset))
return false;
return decode_opnd_vector_reg(rpos, size + size_offset, enc, opnd);
}
static inline bool
encode_scalar_size_regx(uint size_offset, int rpos, uint enc, int opcode, byte *pc,
opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_reg(opnd))
return false;
reg_t reg = opnd_get_reg(opnd);
aarch64_reg_offset offset = get_reg_offset(reg);
if (offset > DOUBLE_REG) {
return false;
}
bool reg_written = encode_opnd_vector_reg(rpos, offset, opnd, enc_out);
*enc_out |= (offset - size_offset) << 22;
return reg_written;
}
static inline bool
decode_hsd_size_regx(int rpos, uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_scalar_size_regx(1, rpos, enc, opcode, pc, opnd);
}
static inline bool
encode_hsd_size_regx(int rpos, uint enc, int opcode, byte *pc, opnd_t opnd,
OUT uint *enc_out)
{
return encode_scalar_size_regx(1, rpos, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_bhsd_size_regx(int rpos, uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_scalar_size_regx(0, rpos, enc, opcode, pc, opnd);
}
static inline bool
encode_bhsd_size_regx(int rpos, uint enc, int opcode, byte *pc, opnd_t opnd,
OUT uint *enc_out)
{
return encode_scalar_size_regx(0, rpos, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_opnd_float_reg0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_float_reg(0, enc, opnd);
}
static inline bool
encode_opnd_float_reg0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_float_reg(0, opnd, enc_out);
}
static inline bool
decode_opnd_hsd_size_reg0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_hsd_size_regx(0, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_hsd_size_reg0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_hsd_size_regx(0, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_opnd_bhsd_size_reg0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_bhsd_size_regx(0, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_bhsd_size_reg0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_bhsd_size_regx(0, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_opnd_float_reg5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_float_reg(5, enc, opnd);
}
static inline bool
encode_opnd_float_reg5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_float_reg(5, opnd, enc_out);
}
static inline bool
decode_opnd_hsd_size_reg5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_hsd_size_regx(5, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_hsd_size_reg5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_hsd_size_regx(5, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_opnd_bhsd_size_reg5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_bhsd_size_regx(5, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_bhsd_size_reg5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_bhsd_size_regx(5, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_opnd_float_reg10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_float_reg(10, enc, opnd);
}
static inline bool
encode_opnd_float_reg10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_float_reg(10, opnd, enc_out);
}
static inline bool
decode_opnd_float_reg16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_float_reg(16, enc, opnd);
}
static inline bool
encode_opnd_float_reg16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_float_reg(16, opnd, enc_out);
}
static inline bool
decode_opnd_hsd_size_reg16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_hsd_size_regx(16, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_hsd_size_reg16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_hsd_size_regx(16, enc, opcode, pc, opnd, enc_out);
}
static inline bool
decode_opnd_bhsd_size_reg16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_bhsd_size_regx(16, enc, opcode, pc, opnd);
}
static inline bool
encode_opnd_bhsd_size_reg16(uint enc, int opcode, byte *pc, opnd_t opnd,
OUT uint *enc_out)
{
return encode_bhsd_size_regx(16, enc, opcode, pc, opnd, enc_out);
}
/* mem0p: as mem0, but a pair of registers, so double size */
static inline bool
decode_opnd_mem0p(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem0_scale(extract_uint(enc, 30, 1) + 3, enc, opnd);
}
static inline bool
encode_opnd_mem0p(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem0_scale(extract_uint(enc, 30, 1) + 3, opnd, enc_out);
}
/* x16imm: immediate operand for SIMD load/store multiple structures (post-indexed) */
static inline bool
decode_opnd_x16imm(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int num = extract_uint(enc, 16, 5);
if (num < 31)
*opnd = opnd_create_reg(DR_REG_X0 + num);
else {
int bytes = (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc);
*opnd = opnd_create_immed_int(bytes, OPSZ_1);
}
return true;
}
static inline bool
encode_opnd_x16imm(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_is_reg(opnd)) {
uint num = opnd_get_reg(opnd) - DR_REG_X0;
if (num == 31)
return false;
*enc_out = num << 16;
return true;
} else if (opnd_is_immed_int(opnd)) {
ptr_int_t bytes = opnd_get_immed_int(opnd);
if (bytes != (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc))
return false;
*enc_out = 31U << 16;
return true;
}
return false;
}
/* index3: index of D subreg in Q register: 0-1 */
static inline bool
decode_opnd_index3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(3, enc, opnd);
}
static inline bool
encode_opnd_index3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(3, opnd, enc_out);
}
/* dq0: D/Q register at bit position 0; bit 30 selects Q reg */
static inline bool
decode_opnd_dq0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(0, 0, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(0, 0, 30, opnd, enc_out);
}
/* dq0p1: as dq0 but add 1 mod 32 to reg number */
static inline bool
decode_opnd_dq0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(1, 0, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(1, 0, 30, opnd, enc_out);
}
/* dq0p2: as dq0 but add 2 mod 32 to reg number */
static inline bool
decode_opnd_dq0p2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(2, 0, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0p2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(2, 0, 30, opnd, enc_out);
}
/* dq0p3: as dq0 but add 3 mod 32 to reg number */
static inline bool
decode_opnd_dq0p3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(3, 0, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0p3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(3, 0, 30, opnd, enc_out);
}
/* vt0: first register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(0, enc, opnd);
}
static inline bool
encode_opnd_vt0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(0, enc, opnd, enc_out);
}
/* vt1: second register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(1, enc, opnd);
}
static inline bool
encode_opnd_vt1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(1, enc, opnd, enc_out);
}
/* vt2: third register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(2, enc, opnd);
}
static inline bool
encode_opnd_vt2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(2, enc, opnd, enc_out);
}
/* vt3: fourth register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(3, enc, opnd);
}
static inline bool
encode_opnd_vt3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(3, enc, opnd, enc_out);
}
/* dq5: D/Q register at bit position 5; bit 30 selects Q reg */
static inline bool
decode_opnd_dq5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(0, 5, 30, enc, opnd);
}
static inline bool
encode_opnd_dq5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(0, 5, 30, opnd, enc_out);
}
/* index2: index of S subreg in Q register: 0-3 */
static inline bool
decode_opnd_index2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(2, enc, opnd);
}
static inline bool
encode_opnd_index2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(2, opnd, enc_out);
}
/* index1: index of H subreg in Q register: 0-7 */
static inline bool
decode_opnd_index1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(1, enc, opnd);
}
static inline bool
encode_opnd_index1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(1, opnd, enc_out);
}
/* index0: index of B subreg in Q register: 0-15 */
static inline bool
decode_opnd_index0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(0, enc, opnd);
}
static inline bool
encode_opnd_index0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(0, opnd, enc_out);
}
/* memvm: memory operand for SIMD load/store multiple structures */
static inline bool
decode_opnd_memvm(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvm(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
int regs = multistruct_regcount(enc);
opnd_size_t size;
uint rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0)
return false;
size = opnd_get_size(opnd);
if (size != opnd_size_from_bytes(regs * 8) && size != opnd_size_from_bytes(regs * 16))
return false;
*enc_out = rn << 5 | (uint)(size == opnd_size_from_bytes(regs * 16)) << 30;
return true;
}
/* dq16_h_sz: D/Q register at bit position 16 with 4 bits only, for the FP16
* by-element encoding; bit 30 selects Q reg
*/
static inline bool
decode_opnd_dq16_h_sz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg((TEST(1U << 30, enc) ? DR_REG_Q0 : DR_REG_D0) +
extract_uint(enc, 16, 4));
return true;
}
static inline bool
encode_opnd_dq16_h_sz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint num;
bool q;
if (!opnd_is_reg(opnd))
return false;
q = (uint)(opnd_get_reg(opnd) - DR_REG_Q0) < 16;
num = opnd_get_reg(opnd) - (q ? DR_REG_Q0 : DR_REG_D0);
if (num >= 16)
return false;
*enc_out = num << 16 | (uint)q << 30;
return true;
}
/* wx0_imm5: bits 0-4 is a GPR whos width is dependent on information in
an imm5 from bits 16-20 and Q from bit 30
*/
static inline bool
decode_opnd_wx0_imm5_q(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint imm5 = extract_int(enc, 16, 5);
bool is_x_register = get_imm5_offset(imm5) == 3 ? true : false;
*opnd = opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), is_x_register, false));
return true;
}
static inline bool
encode_opnd_wx0_imm5_q(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint num = 0;
bool is_x = false;
if (!opnd_is_reg(opnd))
ASSERT(false);
if (!encode_reg(&num, &is_x, opnd_get_reg(opnd), false))
ASSERT(false);
*enc_out = num;
if (is_x)
*enc_out |= (1 << 30);
return true;
}
/* dq16: D/Q register at bit position 16; bit 30 selects Q reg */
static inline bool
decode_opnd_dq16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(0, 16, 30, enc, opnd);
}
static inline bool
encode_opnd_dq16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(0, 16, 30, opnd, enc_out);
}
/* imm6: shift amount for logical and arithmetical instructions */
static inline bool
decode_opnd_imm6(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (!TEST(1U << 31, enc) && TEST(1U << 15, enc))
return false;
return decode_opnd_int(10, 6, false, 0, OPSZ_6b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm6(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!TEST(1U << 31, enc) && TEST(1U << 15, enc))
return false;
return encode_opnd_int(10, 6, false, 0, 0, opnd, enc_out);
}
/* imms: second immediate operand for bitfield operation */
static inline bool
decode_opnd_imms(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_imm_bf(10, enc, opnd);
}
static inline bool
encode_opnd_imms(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_imm_bf(10, enc, opnd, enc_out);
}
/* immr: first immediate operand for bitfield operation */
static inline bool
decode_opnd_immr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_imm_bf(16, enc, opnd);
}
static inline bool
encode_opnd_immr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_imm_bf(16, enc, opnd, enc_out);
}
/* imm16sh: shift amount for 16-bit immediate of MOVK/MOVN/MOVZ/SVC */
static inline bool
decode_opnd_imm16sh(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (!TEST(1U << 31, enc) && TEST(1U << 22, enc))
return false;
return decode_opnd_int(21, 2, false, 4, OPSZ_6b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm16sh(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(21, 2, false, 4, 0, opnd, &t) ||
(!TEST(1U << 31, enc) && TEST(1U << 22, t)))
return false;
*enc_out = t;
return true;
}
/* mem0: memory operand with no offset, gets size from bits 30 and 31 */
static inline bool
decode_opnd_mem0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem0_scale(extract_uint(enc, 30, 2), enc, opnd);
}
static inline bool
encode_opnd_mem0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem0_scale(extract_uint(enc, 30, 2), opnd, enc_out);
}
/* mem9post: post-indexed mem9, so offset is zero */
static inline bool
decode_opnd_mem9post(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), true, enc, opnd);
}
static inline bool
encode_opnd_mem9post(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), true, opnd, enc_out);
}
/* mem9: memory operand with 9-bit offset; gets size from bits 30 and 31 */
static inline bool
decode_opnd_mem9(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), false, enc, opnd);
}
static inline bool
encode_opnd_mem9(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), false, opnd, enc_out);
}
/* memreg: memory operand with register offset; gets size from bits 30 and 31 */
static inline bool
decode_opnd_memreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_memreg_size(opnd_size_from_bytes(1 << extract_uint(enc, 30, 2)),
enc, opnd);
}
static inline bool
encode_opnd_memreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_memreg_size(opnd_size_from_bytes(1 << extract_uint(enc, 30, 2)),
opnd, enc_out);
}
/* mem12: memory operand with 12-bit offset; gets size from bits 30 and 31 */
static inline bool
decode_opnd_mem12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem12_scale(extract_uint(enc, 30, 2), false, enc, opnd);
}
static inline bool
encode_opnd_mem12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem12_scale(extract_uint(enc, 30, 2), false, opnd, enc_out);
}
/* mem7post: post-indexed mem7, so offset is zero */
static inline bool
decode_opnd_mem7post(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem7_postindex(true, enc, opnd);
}
static inline bool
encode_opnd_mem7post(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem7_postindex(true, enc, opnd, enc_out);
}
/* mem7off: just the 7-bit offset from mem7 */
static inline bool
decode_opnd_mem7off(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(15, 7, true, mem7_scale(enc), OPSZ_PTR, 0, enc, opnd);
}
static inline bool
encode_opnd_mem7off(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(15, 7, true, mem7_scale(enc), 0, opnd, enc_out);
}
/* mem7: memory operand with 7-bit offset; gets size from bits 26, 30 and 31 */
static inline bool
decode_opnd_mem7(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem7_postindex(false, enc, opnd);
}
static inline bool
encode_opnd_mem7(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem7_postindex(false, enc, opnd, enc_out);
}
/* memlit: memory operand for literal load; gets size from bits 26, 30 and 31 */
static inline bool
decode_opnd_memlit(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_rel_addr(pc + 4 * extract_int(enc, 5, 19), memlit_size(enc));
return true;
}
static inline bool
encode_opnd_memlit(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
ptr_uint_t off;
if (!opnd_is_rel_addr(opnd) || opnd_get_size(opnd) != memlit_size(enc))
return false;
off = (byte *)opnd_get_addr(opnd) - pc;
if ((off & 3) != 0 || off + (1U << 20) >= 1U << 21)
return false;
*enc_out = (off >> 2 & 0x7ffff) << 5;
return true;
}
/* wx0: W/X register or WZR/XZR at bit position 0; bit 31 selects X reg */
static inline bool
decode_opnd_wx0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 0, enc, opnd);
}
static inline bool
encode_opnd_wx0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 0, opnd, enc_out);
}
/* wx0sp: W/X register or WSP/XSP at bit position 0; bit 31 selects X reg */
static inline bool
decode_opnd_wx0sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(true, 0, enc, opnd);
}
static inline bool
encode_opnd_wx0sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(true, 0, opnd, enc_out);
}
/* wx5: W/X register or WZR/XZR at bit position 5; bit 31 selects X reg */
static inline bool
decode_opnd_wx5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 5, enc, opnd);
}
static inline bool
encode_opnd_wx5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 5, opnd, enc_out);
}
/* wx5sp: W/X register or WSP/XSP at bit position 5; bit 31 selects X reg */
static inline bool
decode_opnd_wx5sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(true, 5, enc, opnd);
}
static inline bool
encode_opnd_wx5sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(true, 5, opnd, enc_out);
}
/* wx10: W/X register or WZR/XZR at bit position 10; bit 31 selects X reg */
static inline bool
decode_opnd_wx10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 10, enc, opnd);
}
static inline bool
encode_opnd_wx10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 10, opnd, enc_out);
}
/* wx16: W/X register or WZR/XZR at bit position 16; bit 31 selects X reg */
static inline bool
decode_opnd_wx16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 16, enc, opnd);
}
static inline bool
encode_opnd_wx16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 16, opnd, enc_out);
}
/*******************************************************************************
* Pairs of functions for decoding and encoding opndsets, as listed in "codec.txt".
* Currently all branch instructions are handled in this way.
*/
/* adr: used for ADR and ADRP */
static inline bool
decode_opnds_adr(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
opnd_t opnd;
if (!decode_opnd_adr_page(opcode == OP_adrp ? 12 : 0, enc, pc, &opnd))
return false;
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_dst(instr, 0,
opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), true, false)));
instr_set_src(instr, 0, opnd);
return true;
}
static inline uint
encode_opnds_adr(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
int opcode = instr_get_opcode(instr);
uint rd, adr;
if (instr_num_dsts(instr) == 1 && instr_num_srcs(instr) == 1 &&
encode_opnd_adr_page(opcode == OP_adrp ? 12 : 0, pc, instr_get_src(instr, 0),
&adr, instr, di) &&
encode_opnd_wxn(true, false, 0, instr_get_dst(instr, 0), &rd))
return (enc | adr | rd);
return ENCFAIL;
}
/* b: used for B and BL */
static inline bool
decode_opnds_b(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
if (opcode == OP_bl) {
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_dst(instr, 0, opnd_create_reg(DR_REG_X30));
} else
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 0, 26) * 4));
return true;
}
static inline uint
encode_opnds_b(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
int opcode = instr_get_opcode(instr);
bool is_bl = (opcode == OP_bl);
uint off, x30;
if (instr_num_dsts(instr) == (is_bl ? 1 : 0) && instr_num_srcs(instr) == 1 &&
(!is_bl || encode_opnd_impx30(enc, opcode, pc, instr_get_dst(instr, 0), &x30)) &&
encode_pc_off(&off, 26, pc, instr, instr_get_src(instr, 0), di))
return (enc | off);
return ENCFAIL;
}
/* bcond: used for B.cond */
static inline bool
decode_opnds_bcond(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 19) * 4));
instr_set_predicate(instr, DR_PRED_EQ + (enc & 15));
return true;
}
static inline uint
encode_opnds_bcond(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
uint off;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 1 &&
encode_pc_off(&off, 19, pc, instr, instr_get_src(instr, 0), di) &&
(uint)(instr_get_predicate(instr) - DR_PRED_EQ) < 16)
return (enc | off << 5 | (instr_get_predicate(instr) - DR_PRED_EQ));
return ENCFAIL;
}
/* ccm: operands for conditional compare instructions */
static inline bool
decode_opnds_ccm(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 3);
/* Rn */
opnd_t rn;
if (!decode_opnd_rn(false, 5, enc, &rn))
return false;
instr_set_src(instr, 0, rn);
opnd_t rm;
if (TEST(1U << 11, enc)) /* imm5 */
instr_set_src(instr, 1, opnd_create_immed_int(extract_uint(enc, 16, 5), OPSZ_5b));
else if (!decode_opnd_rn(false, 16, enc, &rm)) /* Rm */
return false;
else
instr_set_src(instr, 1, rm);
/* nzcv */
instr_set_src(instr, 2, opnd_create_immed_int(extract_uint(enc, 0, 4), OPSZ_4b));
/* cond */
instr_set_predicate(instr, DR_PRED_EQ + extract_uint(enc, 12, 4));
return true;
}
static inline uint
encode_opnds_ccm(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
uint rn;
uint rm_imm5 = 0;
uint imm5_flag = 0;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 3 &&
encode_opnd_rn(false, 5, instr_get_src(instr, 0), &rn) && /* Rn */
opnd_is_immed_int(instr_get_src(instr, 2)) && /* nzcv */
(uint)(instr_get_predicate(instr) - DR_PRED_EQ) < 16) { /* cond */
uint nzcv = opnd_get_immed_int(instr_get_src(instr, 2));
uint cond = instr_get_predicate(instr) - DR_PRED_EQ;
if (opnd_is_immed_int(instr_get_src(instr, 1))) { /* imm5 */
rm_imm5 = opnd_get_immed_int(instr_get_src(instr, 1)) << 16;
imm5_flag = 1;
} else if (opnd_is_reg(instr_get_src(instr, 1))) { /* Rm */
encode_opnd_rn(false, 16, instr_get_src(instr, 1), &rm_imm5);
} else
return ENCFAIL;
return (enc | nzcv | rn | (imm5_flag << 11) | rm_imm5 | (cond << 12));
}
return ENCFAIL;
}
/* cbz: used for CBNZ and CBZ */
static inline bool
decode_opnds_cbz(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 2);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 19) * 4));
instr_set_src(
instr, 1,
opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), TEST(1U << 31, enc), false)));
return true;
}
static inline uint
encode_opnds_cbz(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
uint rt, off;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 2 &&
encode_pc_off(&off, 19, pc, instr, instr_get_src(instr, 0), di) &&
encode_opnd_rn(false, 0, instr_get_src(instr, 1), &rt))
return (enc | off << 5 | rt);
return ENCFAIL;
}
/* logic_imm: used for AND, ANDS, EOR and ORR.
* Logical (immediate) instructions are awkward because there are sometimes
* many ways of representing the same immediate value. We add the raw encoding
* as an additional operand when the encoding is not the canonical one.
*/
static inline bool
decode_opnds_logic_imm(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr,
int opcode)
{
bool is_x = TEST(1U << 31, enc);
uint imm_enc = extract_uint(enc, 10, 13); /* encoding of bitmask */
ptr_uint_t imm_val = decode_bitmask(imm_enc); /* value of bitmask */
bool canonical = encode_bitmask(imm_val) == imm_enc;
if (imm_val == 0 || (!is_x && TEST(1U << 12, imm_enc)))
return false;
if (!is_x)
imm_val &= 0xffffffff;
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 1, 2 + (canonical ? 0 : 1));
instr_set_dst(
instr, 0,
opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), is_x, opcode != OP_ands)));
instr_set_src(instr, 0,
opnd_create_reg(decode_reg(extract_uint(enc, 5, 5), is_x, false)));
instr_set_src(instr, 1, opnd_create_immed_uint(imm_val, is_x ? OPSZ_8 : OPSZ_4));
if (!canonical)
instr_set_src(instr, 2, opnd_create_immed_uint(imm_enc, OPSZ_2));
return true;
}
static inline uint
encode_opnds_logic_imm(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
int opcode = instr_get_opcode(instr);
int srcs = instr_num_srcs(instr);
opnd_t opnd_val;
ptr_uint_t imm_val;
uint rd, rn;
if (srcs < 2 || srcs > 3 || instr_num_dsts(instr) != 1)
return ENCFAIL;
opnd_val = instr_get_src(instr, 1);
if (!encode_opnd_rn(opcode != OP_ands, 0, instr_get_dst(instr, 0), &rd) ||
!encode_opnd_rn(false, 5, instr_get_src(instr, 0), &rn) ||
TEST(1U << 31, rd ^ rn) || !opnd_is_immed_int(opnd_val))
return ENCFAIL;
imm_val = opnd_get_immed_int(opnd_val);
if (!TEST(1U << 31, rd)) {
if ((imm_val >> 32) != 0)
return ENCFAIL;
imm_val |= imm_val << 32;
}
if (srcs == 3) {
opnd_t opnd_enc = instr_get_src(instr, 2);
ptr_int_t imm_enc;
if (!opnd_is_immed_int(opnd_enc))
return ENCFAIL;
imm_enc = opnd_get_immed_int(opnd_enc);
if (imm_enc < 0 || imm_enc > 0x1fff || decode_bitmask(imm_enc) != imm_val)
return ENCFAIL;
return (enc | rd | rn | (uint)imm_enc << 10);
} else {
int imm_enc = encode_bitmask(imm_val);
if (imm_enc < 0)
return ENCFAIL;
return (enc | rd | rn | (uint)imm_enc << 10);
}
}
/* mst: used for MSR.
* With MSR the destination register may or may not be one of the system registers
* that we recognise.
*/
static inline bool
decode_opnds_msr(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
opnd_t opnd = decode_sysreg(extract_uint(enc, 5, 15));
instr_set_opcode(instr, opcode);
if (opnd_is_reg(opnd)) {
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_dst(instr, 0, opnd);
} else {
instr_set_num_opnds(dcontext, instr, 0, 2);
instr_set_src(instr, 1, opnd);
}
instr_set_src(instr, 0,
opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), true, false)));
return true;
}
static inline uint
encode_opnds_msr(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
uint imm15, xt;
if (instr_num_dsts(instr) == 1 && instr_num_srcs(instr) == 1 &&
opnd_is_reg(instr_get_dst(instr, 0)) &&
encode_sysreg(&imm15, instr_get_dst(instr, 0)) &&
encode_opnd_wxn(true, false, 0, instr_get_src(instr, 0), &xt))
return (enc | xt | imm15 << 5);
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 2 &&
opnd_is_immed_int(instr_get_src(instr, 1)) &&
encode_opnd_wxn(true, false, 0, instr_get_src(instr, 0), &xt) &&
encode_sysreg(&imm15, instr_get_src(instr, 1)))
return (enc | xt | imm15 << 5);
return ENCFAIL;
}
/* tbz: used for TBNZ and TBZ */
static inline bool
decode_opnds_tbz(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 3);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 14) * 4));
instr_set_src(instr, 1,
opnd_create_reg(decode_reg(extract_uint(enc, 0, 5), true, false)));
instr_set_src(instr, 2,
opnd_create_immed_int((enc >> 19 & 31) | (enc >> 26 & 32), OPSZ_5b));
return true;
}
static inline uint
encode_opnds_tbz(byte *pc, instr_t *instr, uint enc, decode_info_t *di)
{
uint xt, imm6, off;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 3 &&
encode_pc_off(&off, 14, pc, instr, instr_get_src(instr, 0), di) &&
encode_opnd_wxn(true, false, 0, instr_get_src(instr, 1), &xt) &&
encode_opnd_int(0, 6, false, 0, 0, instr_get_src(instr, 2), &imm6))
return (enc | off << 5 | xt | (imm6 & 31) << 19 | (imm6 & 32) << 26);
return ENCFAIL;
}
/******************************************************************************/
/* Include automatically generated decoder and encoder. */
#include "decode_gen.h"
#include "encode_gen.h"
/******************************************************************************/
byte *
decode_common(dcontext_t *dcontext, byte *pc, byte *orig_pc, instr_t *instr)
{
byte *next_pc = pc + 4;
uint enc = *(uint *)pc;
uint eflags = 0;
int opc;
CLIENT_ASSERT(instr->opcode == OP_INVALID || instr->opcode == OP_UNDECODED,
"decode: instr is already decoded, may need to call instr_reset()");
if (!decoder(enc, dcontext, orig_pc, instr)) {
/* This clause handles undefined HINT instructions. See the comment
* 'Notes on specific instructions' in codec.txt for details. If the
* decoder reads an undefined hint, a message with the unallocated
* CRm:op2 field value is output and the encoding converted into a NOP
* instruction.
*/
if ((enc & 0xfffff01f) == 0xd503201f) {
SYSLOG_INTERNAL_WARNING("Undefined HINT instruction found: "
"encoding 0x%x (CRm:op2 0x%x)\n",
enc, (enc & 0xfe0) >> 5);
instr_set_opcode(instr, OP_nop);
instr_set_num_opnds(dcontext, instr, 0, 0);
} else {
/* We use OP_xx for instructions not yet handled by the decoder.
* If an A64 instruction accesses a general-purpose register
* (except X30) then the number of that register appears in one
* of four possible places in the instruction word, so we can
* pessimistically assume that an unrecognised instruction reads
* and writes all four of those registers, and this is
* sufficient to enable correct (though often excessive) mangling.
*/
instr_set_opcode(instr, OP_xx);
instr_set_num_opnds(dcontext, instr, 4, 5);
instr->src0 = OPND_CREATE_INT32(enc);
instr->srcs[0] = opnd_create_reg(DR_REG_X0 + (enc & 31));
instr->dsts[0] = opnd_create_reg(DR_REG_X0 + (enc & 31));
instr->srcs[1] = opnd_create_reg(DR_REG_X0 + (enc >> 5 & 31));
instr->dsts[1] = opnd_create_reg(DR_REG_X0 + (enc >> 5 & 31));
instr->srcs[2] = opnd_create_reg(DR_REG_X0 + (enc >> 10 & 31));
instr->dsts[2] = opnd_create_reg(DR_REG_X0 + (enc >> 10 & 31));
instr->srcs[3] = opnd_create_reg(DR_REG_X0 + (enc >> 16 & 31));
instr->dsts[3] = opnd_create_reg(DR_REG_X0 + (enc >> 16 & 31));
}
}
/* XXX i#2374: This determination of flag usage should be separate from the
* decoding of operands.
*
* Apart from explicit read/write from/to flags register using MRS and MSR,
* a field in codec.txt specifies whether instructions read/write from/to
* flags register.
*/
opc = instr_get_opcode(instr);
if (opc == OP_mrs && instr_num_srcs(instr) == 1 &&
opnd_is_reg(instr_get_src(instr, 0)) &&
opnd_get_reg(instr_get_src(instr, 0)) == DR_REG_NZCV) {
eflags |= EFLAGS_READ_NZCV;
}
if (opc == OP_msr && instr_num_dsts(instr) == 1 &&
opnd_is_reg(instr_get_dst(instr, 0)) &&
opnd_get_reg(instr_get_dst(instr, 0)) == DR_REG_NZCV) {
eflags |= EFLAGS_WRITE_NZCV;
}
/* XXX i#2626: Until the decoder for AArch64 covers all the instructions that
* read/write aflags, as a workaround conservatively assume that all OP_xx
* instructions (i.e., unrecognized instructions) may read/write aflags.
*/
if (opc == OP_xx) {
eflags |= EFLAGS_READ_ARITH;
eflags |= EFLAGS_WRITE_ARITH;
}
instr->eflags |= eflags;
instr_set_eflags_valid(instr, true);
instr_set_operands_valid(instr, true);
if (orig_pc != pc) {
/* We do not want to copy when encoding and condone an invalid
* relative target.
* TODO i#4016: Add re-relativization support without having to re-encode.
*/
instr_set_raw_bits_valid(instr, false);
instr_set_translation(instr, orig_pc);
} else {
/* We set raw bits AFTER setting all srcs and dsts because setting
* a src or dst marks instr as having invalid raw bits.
*/
ASSERT(CHECK_TRUNCATE_TYPE_uint(next_pc - pc));
instr_set_raw_bits(instr, pc, (uint)(next_pc - pc));
}
return next_pc;
}
uint
encode_common(byte *pc, instr_t *i, decode_info_t *di)
{
ASSERT(((ptr_int_t)pc & 3) == 0);
return encoder(pc, i, di);
}
| 1 | 25,823 | Is this the correct place for this assert? Does this routine rely on it being 64, or was this only for testing? | DynamoRIO-dynamorio | c |
@@ -3,7 +3,7 @@ package hardware
import (
"github.com/shirou/gopsutil/mem"
"github.com/sonm-io/core/insonmnia/hardware/cpu"
- "github.com/sonm-io/core/insonmnia/hardware/gpu"
+ pb "github.com/sonm-io/core/proto"
)
// Hardware accumulates the finest hardware information about system the miner | 1 | package hardware
import (
"github.com/shirou/gopsutil/mem"
"github.com/sonm-io/core/insonmnia/hardware/cpu"
"github.com/sonm-io/core/insonmnia/hardware/gpu"
)
// Hardware accumulates the finest hardware information about system the miner
// is running on.
type Hardware struct {
CPU []cpu.Device
Memory *mem.VirtualMemoryStat
GPU []gpu.Device
}
// LogicalCPUCount returns the number of logical CPUs in the system.
func (h *Hardware) LogicalCPUCount() int {
count := 0
for _, c := range h.CPU {
count += int(c.Cores)
}
return count
}
// TotalMemory returns the total number of bytes.
func (h *Hardware) TotalMemory() uint64 {
return h.Memory.Total
}
// HasGPU returns true if a system has GPU on the board.
func (h *Hardware) HasGPU() bool {
return len(h.GPU) > 0
}
type HardwareInfo interface {
// CPU returns information about system CPU.
//
// This includes vendor name, model name, number of cores, cache info,
// instruction flags and many others to be able to identify and to properly
// account the CPU.
CPU() ([]cpu.Device, error)
// Memory returns information about system memory.
//
// This includes total physical memory, available memory and many others,
// expressed in bytes.
Memory() (*mem.VirtualMemoryStat, error)
// GPU returns information about GPU devices on the machine.
GPU() ([]gpu.Device, error)
// Info returns all described above hardware statistics.
Info() (*Hardware, error)
}
type hardwareInfo struct{}
func (*hardwareInfo) CPU() ([]cpu.Device, error) {
return cpu.GetCPUDevices()
}
func (h *hardwareInfo) Memory() (*mem.VirtualMemoryStat, error) {
return mem.VirtualMemory()
}
func (*hardwareInfo) GPU() ([]gpu.Device, error) {
return gpu.GetGPUDevices()
}
func (h *hardwareInfo) Info() (*Hardware, error) {
cpuInfo, err := h.CPU()
if err != nil {
return nil, err
}
memory, err := h.Memory()
if err != nil {
return nil, err
}
gpuInfo, err := h.GPU()
if err != nil {
if err != gpu.ErrUnsupportedPlatform {
return nil, err
}
gpuInfo = make([]gpu.Device, 0)
}
hardware := &Hardware{
CPU: cpuInfo,
Memory: memory,
GPU: gpuInfo,
}
return hardware, nil
}
// New constructs a new hardware info collector.
func New() HardwareInfo {
return &hardwareInfo{}
}
| 1 | 6,414 | No pb please | sonm-io-core | go |
@@ -0,0 +1,11 @@
+require 'migrate'
+class AddJoinTableBetweenUsersAndChangesets < ActiveRecord::Migration
+ def change
+ create_table :changesets_subscribers, id: false do |t|
+ t.column :subscriber_id, :bigint, null: false
+ t.column :changeset_id, :bigint, null: false
+ end
+ add_foreign_key :changesets_subscribers, [:subscriber_id], :users, [:id]
+ add_foreign_key :changesets_subscribers, [:changeset_id], :changesets, [:id]
+ end
+end | 1 | 1 | 9,296 | We need to add indexes here on both `subscriber_id` and `changeset_id` or things will quickly collapse as we build up subscribers ;-) What I would suggest is a unique index on `[:subscriber_id, :changeset_id]` which will also make duplicate entries impossible, and an ordinary index on `[:changeset_id]` for finding the subscribers to a changeset. | openstreetmap-openstreetmap-website | rb |
|
@@ -0,0 +1,13 @@
+package slackbot
+
+import (
+ "github.com/slack-go/slack"
+)
+
+func sendBotReply(client *slack.Client, channel, message string) error {
+ _, _, err := client.PostMessage(channel, slack.MsgOptionText(message, false))
+ if err != nil {
+ return err
+ }
+ return nil
+} | 1 | 1 | 10,850 | use the context versions of everything, e.g. `PostMessageContext` and thread it through the functions. will save you a bunch of refactoring trouble later on. | lyft-clutch | go |
|
@@ -64,9 +64,7 @@ def implements(
obj: "BaseChecker",
interface: Union[Type["Interface"], Tuple[Type["Interface"], ...]],
) -> bool:
- """Return whether the given object (maybe an instance or class) implements
- the interface.
- """
+ """Does the given object (maybe an instance or class) implements the interface."""
kimplements = getattr(obj, "__implements__", ())
if not isinstance(kimplements, (list, tuple)):
kimplements = (kimplements,) | 1 | # Copyright (c) 2009-2010, 2012-2013 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2014 Michal Nowikowski <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015-2018, 2020 Claudiu Popa <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2020-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2021 Nick Drozd <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Interfaces for Pylint objects"""
from collections import namedtuple
from typing import TYPE_CHECKING, Tuple, Type, Union
from astroid import nodes
if TYPE_CHECKING:
from pylint.checkers import BaseChecker
from pylint.reporters.ureports.nodes import Section
__all__ = (
"IRawChecker",
"IAstroidChecker",
"ITokenChecker",
"IReporter",
"IChecker",
"HIGH",
"INFERENCE",
"INFERENCE_FAILURE",
"UNDEFINED",
"CONFIDENCE_LEVELS",
)
Confidence = namedtuple("Confidence", ["name", "description"])
# Warning Certainties
HIGH = Confidence("HIGH", "Warning that is not based on inference result.")
INFERENCE = Confidence("INFERENCE", "Warning based on inference result.")
INFERENCE_FAILURE = Confidence(
"INFERENCE_FAILURE", "Warning based on inference with failures."
)
UNDEFINED = Confidence("UNDEFINED", "Warning without any associated confidence level.")
CONFIDENCE_LEVELS = [HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED]
class Interface:
"""Base class for interfaces."""
@classmethod
def is_implemented_by(cls, instance):
return implements(instance, cls)
def implements(
obj: "BaseChecker",
interface: Union[Type["Interface"], Tuple[Type["Interface"], ...]],
) -> bool:
"""Return whether the given object (maybe an instance or class) implements
the interface.
"""
kimplements = getattr(obj, "__implements__", ())
if not isinstance(kimplements, (list, tuple)):
kimplements = (kimplements,)
return any(issubclass(i, interface) for i in kimplements)
class IChecker(Interface):
"""This is a base interface, not designed to be used elsewhere than for
sub interfaces definition.
"""
def open(self):
"""called before visiting project (i.e. set of modules)"""
def close(self):
"""called after visiting project (i.e. set of modules)"""
class IRawChecker(IChecker):
"""interface for checker which need to parse the raw file"""
def process_module(self, node: nodes.Module) -> None:
"""process a module
the module's content is accessible via ``astroid.stream``
"""
class ITokenChecker(IChecker):
"""Interface for checkers that need access to the token list."""
def process_tokens(self, tokens):
"""Process a module.
tokens is a list of all source code tokens in the file.
"""
class IAstroidChecker(IChecker):
"""interface for checker which prefers receive events according to
statement type
"""
class IReporter(Interface):
"""reporter collect messages and display results encapsulated in a layout"""
def handle_message(self, msg) -> None:
"""Handle the given message object."""
def display_reports(self, layout: "Section") -> None:
"""display results encapsulated in the layout tree"""
| 1 | 19,343 | "not to be used elsewhere other than" | PyCQA-pylint | py |
@@ -205,6 +205,7 @@ class SparkAppenderFactory implements FileAppenderFactory<InternalRow> {
case PARQUET:
return Parquet.writeDeletes(file.encryptingOutputFile())
.createWriterFunc(msgType -> SparkParquetWriters.buildWriter(lazyEqDeleteSparkType(), msgType))
+ .setAll(properties)
.overwrite()
.rowSchema(eqDeleteRowSchema)
.withSpec(spec) | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Map;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.MetricsConfig;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.avro.Avro;
import org.apache.iceberg.deletes.EqualityDeleteWriter;
import org.apache.iceberg.deletes.PositionDeleteWriter;
import org.apache.iceberg.encryption.EncryptedOutputFile;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.io.DataWriter;
import org.apache.iceberg.io.DeleteSchemaUtil;
import org.apache.iceberg.io.FileAppender;
import org.apache.iceberg.io.FileAppenderFactory;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.orc.ORC;
import org.apache.iceberg.parquet.Parquet;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.spark.SparkSchemaUtil;
import org.apache.iceberg.spark.data.SparkAvroWriter;
import org.apache.iceberg.spark.data.SparkOrcWriter;
import org.apache.iceberg.spark.data.SparkParquetWriters;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.unsafe.types.UTF8String;
class SparkAppenderFactory implements FileAppenderFactory<InternalRow> {
private final Map<String, String> properties;
private final Schema writeSchema;
private final StructType dsSchema;
private final PartitionSpec spec;
private final int[] equalityFieldIds;
private final Schema eqDeleteRowSchema;
private final Schema posDeleteRowSchema;
private StructType eqDeleteSparkType = null;
private StructType posDeleteSparkType = null;
SparkAppenderFactory(Map<String, String> properties, Schema writeSchema, StructType dsSchema, PartitionSpec spec,
int[] equalityFieldIds, Schema eqDeleteRowSchema, Schema posDeleteRowSchema) {
this.properties = properties;
this.writeSchema = writeSchema;
this.dsSchema = dsSchema;
this.spec = spec;
this.equalityFieldIds = equalityFieldIds;
this.eqDeleteRowSchema = eqDeleteRowSchema;
this.posDeleteRowSchema = posDeleteRowSchema;
}
static Builder builderFor(Table table, Schema writeSchema, StructType dsSchema) {
return new Builder(table, writeSchema, dsSchema);
}
static class Builder {
private final Table table;
private final Schema writeSchema;
private final StructType dsSchema;
private PartitionSpec spec;
private int[] equalityFieldIds;
private Schema eqDeleteRowSchema;
private Schema posDeleteRowSchema;
Builder(Table table, Schema writeSchema, StructType dsSchema) {
this.table = table;
this.spec = table.spec();
this.writeSchema = writeSchema;
this.dsSchema = dsSchema;
}
Builder spec(PartitionSpec newSpec) {
this.spec = newSpec;
return this;
}
Builder equalityFieldIds(int[] newEqualityFieldIds) {
this.equalityFieldIds = newEqualityFieldIds;
return this;
}
Builder eqDeleteRowSchema(Schema newEqDeleteRowSchema) {
this.eqDeleteRowSchema = newEqDeleteRowSchema;
return this;
}
Builder posDelRowSchema(Schema newPosDelRowSchema) {
this.posDeleteRowSchema = newPosDelRowSchema;
return this;
}
SparkAppenderFactory build() {
Preconditions.checkNotNull(table, "Table must not be null");
Preconditions.checkNotNull(writeSchema, "Write Schema must not be null");
Preconditions.checkNotNull(dsSchema, "DS Schema must not be null");
if (equalityFieldIds != null) {
Preconditions.checkNotNull(eqDeleteRowSchema, "Equality Field Ids and Equality Delete Row Schema" +
" must be set together");
}
if (eqDeleteRowSchema != null) {
Preconditions.checkNotNull(equalityFieldIds, "Equality Field Ids and Equality Delete Row Schema" +
" must be set together");
}
return new SparkAppenderFactory(table.properties(), writeSchema, dsSchema, spec, equalityFieldIds,
eqDeleteRowSchema, posDeleteRowSchema);
}
}
private StructType lazyEqDeleteSparkType() {
if (eqDeleteSparkType == null) {
Preconditions.checkNotNull(eqDeleteRowSchema, "Equality delete row schema shouldn't be null");
this.eqDeleteSparkType = SparkSchemaUtil.convert(eqDeleteRowSchema);
}
return eqDeleteSparkType;
}
private StructType lazyPosDeleteSparkType() {
if (posDeleteSparkType == null) {
Preconditions.checkNotNull(posDeleteRowSchema, "Position delete row schema shouldn't be null");
this.posDeleteSparkType = SparkSchemaUtil.convert(posDeleteRowSchema);
}
return posDeleteSparkType;
}
@Override
public FileAppender<InternalRow> newAppender(OutputFile file, FileFormat fileFormat) {
MetricsConfig metricsConfig = MetricsConfig.fromProperties(properties);
try {
switch (fileFormat) {
case PARQUET:
return Parquet.write(file)
.createWriterFunc(msgType -> SparkParquetWriters.buildWriter(dsSchema, msgType))
.setAll(properties)
.metricsConfig(metricsConfig)
.schema(writeSchema)
.overwrite()
.build();
case AVRO:
return Avro.write(file)
.createWriterFunc(ignored -> new SparkAvroWriter(dsSchema))
.setAll(properties)
.schema(writeSchema)
.overwrite()
.build();
case ORC:
return ORC.write(file)
.createWriterFunc(SparkOrcWriter::new)
.setAll(properties)
.metricsConfig(metricsConfig)
.schema(writeSchema)
.overwrite()
.build();
default:
throw new UnsupportedOperationException("Cannot write unknown format: " + fileFormat);
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
@Override
public DataWriter<InternalRow> newDataWriter(EncryptedOutputFile file, FileFormat format, StructLike partition) {
return new DataWriter<>(newAppender(file.encryptingOutputFile(), format), format,
file.encryptingOutputFile().location(), spec, partition, file.keyMetadata());
}
@Override
public EqualityDeleteWriter<InternalRow> newEqDeleteWriter(EncryptedOutputFile file, FileFormat format,
StructLike partition) {
Preconditions.checkState(equalityFieldIds != null && equalityFieldIds.length > 0,
"Equality field ids shouldn't be null or empty when creating equality-delete writer");
Preconditions.checkNotNull(eqDeleteRowSchema,
"Equality delete row schema shouldn't be null when creating equality-delete writer");
try {
switch (format) {
case PARQUET:
return Parquet.writeDeletes(file.encryptingOutputFile())
.createWriterFunc(msgType -> SparkParquetWriters.buildWriter(lazyEqDeleteSparkType(), msgType))
.overwrite()
.rowSchema(eqDeleteRowSchema)
.withSpec(spec)
.withPartition(partition)
.equalityFieldIds(equalityFieldIds)
.withKeyMetadata(file.keyMetadata())
.buildEqualityWriter();
case AVRO:
return Avro.writeDeletes(file.encryptingOutputFile())
.createWriterFunc(ignored -> new SparkAvroWriter(lazyEqDeleteSparkType()))
.overwrite()
.rowSchema(eqDeleteRowSchema)
.withSpec(spec)
.withPartition(partition)
.equalityFieldIds(equalityFieldIds)
.withKeyMetadata(file.keyMetadata())
.buildEqualityWriter();
default:
throw new UnsupportedOperationException(
"Cannot write equality-deletes for unsupported file format: " + format);
}
} catch (IOException e) {
throw new UncheckedIOException("Failed to create new equality delete writer", e);
}
}
@Override
public PositionDeleteWriter<InternalRow> newPosDeleteWriter(EncryptedOutputFile file, FileFormat format,
StructLike partition) {
try {
switch (format) {
case PARQUET:
StructType sparkPosDeleteSchema =
SparkSchemaUtil.convert(DeleteSchemaUtil.posDeleteSchema(posDeleteRowSchema));
return Parquet.writeDeletes(file.encryptingOutputFile())
.createWriterFunc(msgType -> SparkParquetWriters.buildWriter(sparkPosDeleteSchema, msgType))
.overwrite()
.rowSchema(posDeleteRowSchema)
.withSpec(spec)
.withPartition(partition)
.withKeyMetadata(file.keyMetadata())
.transformPaths(path -> UTF8String.fromString(path.toString()))
.buildPositionWriter();
case AVRO:
return Avro.writeDeletes(file.encryptingOutputFile())
.createWriterFunc(ignored -> new SparkAvroWriter(lazyPosDeleteSparkType()))
.overwrite()
.rowSchema(posDeleteRowSchema)
.withSpec(spec)
.withPartition(partition)
.withKeyMetadata(file.keyMetadata())
.buildPositionWriter();
default:
throw new UnsupportedOperationException(
"Cannot write pos-deletes for unsupported file format: " + format);
}
} catch (IOException e) {
throw new UncheckedIOException("Failed to create new equality delete writer", e);
}
}
}
| 1 | 39,563 | Thanks for the contribution, @coolderli ! I also think the newPosDeleteWriter need the properties setting ... | apache-iceberg | java |
@@ -0,0 +1,7 @@
+class AddIndexToSubscriptions < ActiveRecord::Migration
+ def change
+ add_index :subscriptions, :user_id
+ add_index :subscriptions, :team_id
+ add_index :subscriptions, [:plan_id, :plan_type]
+ end
+end | 1 | 1 | 8,088 | These additions seem unrelated to this change? | thoughtbot-upcase | rb |
|
@@ -126,4 +126,8 @@ public class TableProperties {
public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled";
public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false;
+
+ public static final String ENGINE_HIVE_ENABLED = "engine.hive.enabled";
+ public static final String ENGINE_HIVE_ENABLED_HIVE_CONF = "iceberg.engine.hive.enabled";
+ public static final boolean ENGINE_HIVE_ENABLED_DEFAULT = false;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
public class TableProperties {
private TableProperties() {
}
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled";
public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String PARQUET_COMPRESSION_LEVEL = "write.parquet.compression-level";
public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null;
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String METADATA_SPLIT_SIZE = "read.split.metadata-target-size";
public static final long METADATA_SPLIT_SIZE_DEFAULT = 32 * 1024 * 1024; // 32 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled";
public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size";
public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String METADATA_PREVIOUS_VERSIONS_MAX = "write.metadata.previous-versions-max";
public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT = 100;
// This enables to delete the oldest metadata file after commit.
public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled";
public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false;
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default";
public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled";
public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false";
public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes";
public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = Long.MAX_VALUE;
public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled";
public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false;
}
| 1 | 25,199 | Could we move this to a class for Hadoop configuration properties, like `org.apache.iceberg.hadoop.ConfigProperties`? | apache-iceberg | java |
@@ -95,19 +95,6 @@ func (r *ChaosCollector) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
manageFlag = true
}
- if obj.IsDeleted() {
- if !manageFlag {
- if err = r.archiveExperiment(req.Namespace, req.Name); err != nil {
- r.Log.Error(err, "failed to archive experiment")
- }
- } else {
- if err = r.event.DeleteByUID(ctx, string(chaosMeta.GetUID())); err != nil {
- r.Log.Error(err, "failed to delete experiment related events")
- }
- }
- return ctrl.Result{}, nil
- }
-
if err := r.setUnarchivedExperiment(req, obj); err != nil {
r.Log.Error(err, "failed to archive experiment")
// ignore error here | 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package collector
import (
"context"
"encoding/json"
"errors"
"github.com/go-logr/logr"
"github.com/jinzhu/gorm"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/pkg/dashboard/core"
)
// ChaosCollector represents a collector for Chaos Object.
type ChaosCollector struct {
client.Client
Log logr.Logger
apiType runtime.Object
archive core.ExperimentStore
event core.EventStore
}
// Reconcile reconciles a chaos collector.
func (r *ChaosCollector) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var (
chaosMeta metav1.Object
ok bool
manageFlag bool
)
if r.apiType == nil {
r.Log.Error(nil, "apiType has not been initialized")
return ctrl.Result{}, nil
}
manageFlag = false
obj, ok := r.apiType.DeepCopyObject().(v1alpha1.InnerObject)
if !ok {
r.Log.Error(nil, "it's not a stateful object")
return ctrl.Result{}, nil
}
err := r.Get(ctx, req.NamespacedName, obj)
if apierrors.IsNotFound(err) {
if chaosMeta, ok = obj.(metav1.Object); !ok {
r.Log.Error(nil, "failed to get chaos meta information")
}
if chaosMeta.GetLabels()[v1alpha1.LabelManagedBy] != "" {
manageFlag = true
}
if !manageFlag {
if err = r.archiveExperiment(req.Namespace, req.Name); err != nil {
r.Log.Error(err, "failed to archive experiment")
}
} else {
if err = r.event.DeleteByUID(ctx, string(chaosMeta.GetUID())); err != nil {
r.Log.Error(err, "failed to delete experiment related events")
}
}
return ctrl.Result{}, nil
}
if err != nil {
r.Log.Error(err, "failed to get chaos object", "request", req.NamespacedName)
return ctrl.Result{}, nil
}
if chaosMeta, ok = obj.(metav1.Object); !ok {
r.Log.Error(nil, "failed to get chaos meta information")
}
if chaosMeta.GetLabels()[v1alpha1.LabelManagedBy] != "" {
manageFlag = true
}
if obj.IsDeleted() {
if !manageFlag {
if err = r.archiveExperiment(req.Namespace, req.Name); err != nil {
r.Log.Error(err, "failed to archive experiment")
}
} else {
if err = r.event.DeleteByUID(ctx, string(chaosMeta.GetUID())); err != nil {
r.Log.Error(err, "failed to delete experiment related events")
}
}
return ctrl.Result{}, nil
}
if err := r.setUnarchivedExperiment(req, obj); err != nil {
r.Log.Error(err, "failed to archive experiment")
// ignore error here
}
return ctrl.Result{}, nil
}
// Setup setups collectors by Manager.
func (r *ChaosCollector) Setup(mgr ctrl.Manager, apiType client.Object) error {
r.apiType = apiType
return ctrl.NewControllerManagedBy(mgr).
For(apiType).
Complete(r)
}
func (r *ChaosCollector) setUnarchivedExperiment(req ctrl.Request, obj v1alpha1.InnerObject) error {
var (
chaosMeta metav1.Object
ok bool
)
if chaosMeta, ok = obj.(metav1.Object); !ok {
r.Log.Error(nil, "failed to get chaos meta information")
}
UID := string(chaosMeta.GetUID())
archive := &core.Experiment{
ExperimentMeta: core.ExperimentMeta{
Namespace: req.Namespace,
Name: req.Name,
Kind: obj.GetObjectKind().GroupVersionKind().Kind,
UID: UID,
Archived: false,
},
}
switch chaos := obj.(type) {
case *v1alpha1.PodChaos:
archive.Action = string(chaos.Spec.Action)
case *v1alpha1.NetworkChaos:
archive.Action = string(chaos.Spec.Action)
case *v1alpha1.IOChaos:
archive.Action = string(chaos.Spec.Action)
case *v1alpha1.TimeChaos, *v1alpha1.KernelChaos, *v1alpha1.StressChaos, *v1alpha1.HTTPChaos:
archive.Action = ""
case *v1alpha1.DNSChaos:
archive.Action = string(chaos.Spec.Action)
case *v1alpha1.PhysicalMachineChaos:
archive.Action = string(chaos.Spec.Action)
case *v1alpha1.AWSChaos:
archive.Action = string(chaos.Spec.Action)
case *v1alpha1.GCPChaos:
archive.Action = string(chaos.Spec.Action)
case *v1alpha1.JVMChaos:
archive.Action = string(chaos.Spec.Action)
default:
return errors.New("unsupported chaos type " + archive.Kind)
}
archive.StartTime = chaosMeta.GetCreationTimestamp().Time
if chaosMeta.GetDeletionTimestamp() != nil {
archive.FinishTime = chaosMeta.GetDeletionTimestamp().Time
}
data, err := json.Marshal(chaosMeta)
if err != nil {
r.Log.Error(err, "failed to marshal chaos", "kind", archive.Kind,
"namespace", archive.Namespace, "name", archive.Name)
return err
}
archive.Experiment = string(data)
find, err := r.archive.FindByUID(context.Background(), UID)
if err != nil && !gorm.IsRecordNotFoundError(err) {
r.Log.Error(err, "failed to find experiment", "UID", UID)
return err
}
if find != nil {
archive.ID = find.ID
archive.CreatedAt = find.CreatedAt
archive.UpdatedAt = find.UpdatedAt
}
if err := r.archive.Set(context.Background(), archive); err != nil {
r.Log.Error(err, "failed to update experiment", "archive", archive)
return err
}
return nil
}
func (r *ChaosCollector) archiveExperiment(ns, name string) error {
if err := r.archive.Archive(context.Background(), ns, name); err != nil {
r.Log.Error(err, "failed to archive experiment", "namespace", ns, "name", name)
return err
}
return nil
}
| 1 | 26,262 | The related statements should also be deleted. | chaos-mesh-chaos-mesh | go |
@@ -455,6 +455,10 @@ class ELBConnection(AWSQueryConnection):
'LoadBalancerName' : lb_name,
'LoadBalancerPort' : lb_port,
}
+ if policies:
+ self.build_list_params(params, policies, 'PolicyNames.member.%d')
+ else:
+ params["PolicyNames"] = ""
self.build_list_params(params, policies, 'PolicyNames.member.%d')
return self.get_status('SetLoadBalancerPoliciesOfListener', params)
| 1 | # Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
load balancing service from AWS.
"""
from boto.connection import AWSQueryConnection
from boto.ec2.instanceinfo import InstanceInfo
from boto.ec2.elb.loadbalancer import LoadBalancer
from boto.ec2.elb.instancestate import InstanceState
from boto.ec2.elb.healthcheck import HealthCheck
from boto.regioninfo import RegionInfo
import boto
RegionData = {
'us-east-1' : 'elasticloadbalancing.us-east-1.amazonaws.com',
'us-west-1' : 'elasticloadbalancing.us-west-1.amazonaws.com',
'us-west-2' : 'elasticloadbalancing.us-west-2.amazonaws.com',
'sa-east-1' : 'elasticloadbalancing.sa-east-1.amazonaws.com',
'eu-west-1' : 'elasticloadbalancing.eu-west-1.amazonaws.com',
'ap-northeast-1' : 'elasticloadbalancing.ap-northeast-1.amazonaws.com',
'ap-southeast-1' : 'elasticloadbalancing.ap-southeast-1.amazonaws.com'}
def regions():
"""
Get all available regions for the SDB service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=ELBConnection)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.elb.ELBConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.ELBConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
class ELBConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'elb_version', '2011-11-15')
DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'elb_region_endpoint',
'elasticloadbalancing.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=False, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/'):
"""
Init method to create a new connection to EC2 Load Balancing Service.
.. note:: The region argument is overridden by the region specified in
the boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path)
def _required_auth_capability(self):
return ['ec2']
def build_list_params(self, params, items, label):
if isinstance(items, str):
items = [items]
for index, item in enumerate(items):
params[label % (index + 1)] = item
def get_all_load_balancers(self, load_balancer_names=None):
"""
Retrieve all load balancers associated with your account.
:type load_balancer_names: list
:keyword load_balancer_names: An optional list of load balancer names.
:rtype: :py:class:`boto.resultset.ResultSet`
:return: A ResultSet containing instances of
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {}
if load_balancer_names:
self.build_list_params(params, load_balancer_names,
'LoadBalancerNames.member.%d')
return self.get_list('DescribeLoadBalancers', params,
[('member', LoadBalancer)])
def create_load_balancer(self, name, zones, listeners, subnets=None,
security_groups=None):
"""
Create a new load balancer for your account. By default the load
balancer will be created in EC2. To create a load balancer inside a
VPC, parameter zones must be set to None and subnets must not be None.
The load balancer will be automatically created under the VPC that
contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
:type zones: List of strings
:param zones: The names of the availability zone(s) to add.
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber,
Protocol, [SSLCertificateId])
where LoadBalancerPortNumber and InstancePortNumber
are integer values between 1 and 65535, Protocol is a
string containing either 'TCP', 'HTTP' or 'HTTPS';
SSLCertificateID is the ARN of a AWS AIM certificate,
and must be specified when doing HTTPS.
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {'LoadBalancerName' : name}
for index, listener in enumerate(listeners):
i = index + 1
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if listener[2]=='HTTPS':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
if zones:
self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
if subnets:
self.build_list_params(params, subnets, 'Subnets.member.%d')
if security_groups:
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
load_balancer = self.get_object('CreateLoadBalancer',
params, LoadBalancer)
load_balancer.name = name
load_balancer.listeners = listeners
load_balancer.availability_zones = zones
load_balancer.subnets = subnets
load_balancer.security_groups = security_groups
return load_balancer
def create_load_balancer_listeners(self, name, listeners):
"""
Creates a Listener (or group of listeners) for an existing Load Balancer
:type name: string
:param name: The name of the load balancer to create the listeners for
:type listeners: List of tuples
:param listeners: Each tuple contains three values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
[SSLCertificateId])
where LoadBalancerPortNumber and InstancePortNumber are
integer values between 1 and 65535, Protocol is a
string containing either 'TCP', 'HTTP' or 'HTTPS';
SSLCertificateID is the ARN of a AWS AIM certificate,
and must be specified when doing HTTPS.
:return: The status of the request
"""
params = {'LoadBalancerName' : name}
for index, listener in enumerate(listeners):
i = index + 1
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if listener[2]=='HTTPS':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
return self.get_status('CreateLoadBalancerListeners', params)
def delete_load_balancer(self, name):
"""
Delete a Load Balancer from your account.
:type name: string
:param name: The name of the Load Balancer to delete
"""
params = {'LoadBalancerName': name}
return self.get_status('DeleteLoadBalancer', params)
def delete_load_balancer_listeners(self, name, ports):
"""
Deletes a load balancer listener (or group of listeners)
:type name: string
:param name: The name of the load balancer to create the listeners for
:type ports: List int
:param ports: Each int represents the port on the ELB to be removed
:return: The status of the request
"""
params = {'LoadBalancerName' : name}
for index, port in enumerate(ports):
params['LoadBalancerPorts.member.%d' % (index + 1)] = port
return self.get_status('DeleteLoadBalancerListeners', params)
def enable_availability_zones(self, load_balancer_name, zones_to_add):
"""
Add availability zones to an existing Load Balancer
All zones must be in the same region as the Load Balancer
Adding zones that are already registered with the Load Balancer
has no effect.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to add.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName' : load_balancer_name}
self.build_list_params(params, zones_to_add,
'AvailabilityZones.member.%d')
return self.get_list('EnableAvailabilityZonesForLoadBalancer',
params, None)
def disable_availability_zones(self, load_balancer_name, zones_to_remove):
"""
Remove availability zones from an existing Load Balancer.
All zones must be in the same region as the Load Balancer.
Removing zones that are not registered with the Load Balancer
has no effect.
You cannot remove all zones from an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to remove.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName' : load_balancer_name}
self.build_list_params(params, zones_to_remove,
'AvailabilityZones.member.%d')
return self.get_list('DisableAvailabilityZonesForLoadBalancer',
params, None)
def register_instances(self, load_balancer_name, instances):
"""
Add new Instances to an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to add.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName' : load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('RegisterInstancesWithLoadBalancer',
params, [('member', InstanceInfo)])
def deregister_instances(self, load_balancer_name, instances):
"""
Remove Instances from an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to remove.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName' : load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DeregisterInstancesFromLoadBalancer',
params, [('member', InstanceInfo)])
def describe_instance_health(self, load_balancer_name, instances=None):
"""
Get current state of all Instances registered to an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances
to return status for. If not provided,
the state of all instances will be returned.
:rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
:return: list of state info for instances in this Load Balancer.
"""
params = {'LoadBalancerName' : load_balancer_name}
if instances:
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DescribeInstanceHealth', params,
[('member', InstanceState)])
def configure_health_check(self, name, health_check):
"""
Define a health check for the EndPoints.
:type name: string
:param name: The mnemonic name associated with the load balancer
:type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:param health_check: A HealthCheck object populated with the desired
values.
:rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck`
"""
params = {'LoadBalancerName' : name,
'HealthCheck.Timeout' : health_check.timeout,
'HealthCheck.Target' : health_check.target,
'HealthCheck.Interval' : health_check.interval,
'HealthCheck.UnhealthyThreshold' : health_check.unhealthy_threshold,
'HealthCheck.HealthyThreshold' : health_check.healthy_threshold}
return self.get_object('ConfigureHealthCheck', params, HealthCheck)
def set_lb_listener_SSL_certificate(self, lb_name, lb_port,
ssl_certificate_id):
"""
Sets the certificate that terminates the specified listener's SSL
connections. The specified certificate replaces any prior certificate
that was used on the same LoadBalancer and port.
"""
params = {
'LoadBalancerName' : lb_name,
'LoadBalancerPort' : lb_port,
'SSLCertificateId' : ssl_certificate_id,
}
return self.get_status('SetLoadBalancerListenerSSLCertificate', params)
def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes that follow
that of an application-generated cookie. This policy can only be
associated with HTTP listeners.
This policy is similar to the policy created by
CreateLBCookieStickinessPolicy, except that the lifetime of the special
Elastic Load Balancing cookie follows the lifetime of the
application-generated cookie specified in the policy configuration. The
load balancer only inserts a new stickiness cookie when the application
response includes a new application cookie.
If the application cookie is explicitly removed or expires, the session
stops being sticky until a new application cookie is issued.
"""
params = {
'CookieName' : name,
'LoadBalancerName' : lb_name,
'PolicyName' : policy_name,
}
return self.get_status('CreateAppCookieStickinessPolicy', params)
def create_lb_cookie_stickiness_policy(self, cookie_expiration_period,
lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes controlled
by the lifetime of the browser (user-agent) or a specified expiration
period. This policy can only be associated only with HTTP listeners.
When a load balancer implements this policy, the load balancer uses a
special cookie to track the backend server instance for each request.
When the load balancer receives a request, it first checks to see if
this cookie is present in the request. If so, the load balancer sends
the request to the application server specified in the cookie. If not,
the load balancer sends the request to a server that is chosen based on
the existing load balancing algorithm.
A cookie is inserted into the response for binding subsequent requests
from the same user to that server. The validity of the cookie is based
on the cookie expiration time, which is specified in the policy
configuration.
"""
params = {
'CookieExpirationPeriod' : cookie_expiration_period,
'LoadBalancerName' : lb_name,
'PolicyName' : policy_name,
}
return self.get_status('CreateLBCookieStickinessPolicy', params)
def delete_lb_policy(self, lb_name, policy_name):
"""
Deletes a policy from the LoadBalancer. The specified policy must not
be enabled for any listeners.
"""
params = {
'LoadBalancerName' : lb_name,
'PolicyName' : policy_name,
}
return self.get_status('DeleteLoadBalancerPolicy', params)
def set_lb_policies_of_listener(self, lb_name, lb_port, policies):
"""
Associates, updates, or disables a policy with a listener on the load
balancer. Currently only zero (0) or one (1) policy can be associated
with a listener.
"""
params = {
'LoadBalancerName' : lb_name,
'LoadBalancerPort' : lb_port,
}
self.build_list_params(params, policies, 'PolicyNames.member.%d')
return self.get_status('SetLoadBalancerPoliciesOfListener', params)
def apply_security_groups_to_lb(self, name, security_groups):
"""
Applies security groups to the load balancer.
Applying security groups that are already registered with the
Load Balancer has no effect.
:type name: string
:param name: The name of the Load Balancer
:type security_groups: List of strings
:param security_groups: The name of the security group(s) to add.
:rtype: List of strings
:return: An updated list of security groups for this Load Balancer.
"""
params = {'LoadBalancerName' : name}
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
return self.get_list('ApplySecurityGroupsToLoadBalancer',
params,
None)
def attach_lb_to_subnets(self, name, subnets):
"""
Attaches load balancer to one or more subnets.
Attaching subnets that are already registered with the
Load Balancer has no effect.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to add.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName' : name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('AttachLoadBalancerToSubnets',
params,
None)
def detach_lb_from_subnets(self, name, subnets):
"""
Detaches load balancer from one or more subnets.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to detach.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName' : name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('DettachLoadBalancerFromSubnets',
params,
None)
| 1 | 8,228 | Shouldn't this line be removed? | boto-boto | py |
@@ -0,0 +1,7 @@
+bad_names = set([
+ 'TEAM',
+ 'PUBLIC'
+])
+
+def blacklisted_name(username):
+ return username in bad_names | 1 | 1 | 16,536 | Just move this into `const.py`. It already has similar stuff. Also, make it uppercase since it's a const. | quiltdata-quilt | py |
|
@@ -294,6 +294,7 @@ public class PasscodeActivity extends Activity {
launchBiometricAuth();
} else {
setMode(PasscodeMode.Check);
+ newMode = PasscodeMode.Check;
}
break;
} | 1 | /*
* Copyright (c) 2011-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.ui;
import android.Manifest;
import android.Manifest.permission;
import android.annotation.TargetApi;
import android.app.Activity;
import android.content.Context;
import android.content.DialogInterface;
import android.content.pm.PackageManager;
import android.hardware.biometrics.BiometricPrompt;
import android.hardware.fingerprint.FingerprintManager;
import android.os.Build.VERSION;
import android.os.Build.VERSION_CODES;
import android.os.Bundle;
import android.os.CancellationSignal;
import android.text.Editable;
import android.text.TextWatcher;
import android.view.KeyEvent;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.WindowManager;
import android.view.accessibility.AccessibilityEvent;
import android.view.accessibility.AccessibilityManager;
import android.view.inputmethod.InputMethodManager;
import android.widget.Button;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.salesforce.androidsdk.R;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.accounts.UserAccountManager;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.security.PasscodeManager;
import java.util.List;
/**
* Passcode activity: takes care of creating/verifying a user passcode.
*/
public class PasscodeActivity extends Activity {
private static final String EXTRA_KEY = "input_text";
protected static final int MAX_PASSCODE_ATTEMPTS = 10;
final private int REQUEST_CODE_ASK_PERMISSIONS = 11;
private PasscodeMode currentMode;
private TextView title, instr, bioInstrTitle, bioInstr;
private PasscodeField passcodeField;
private LinearLayout passcodeBox, biometricBox;
private Button logoutButton, notNowButton, enableButton, verifyButton;
private View fingerImage;
private PasscodeManager passcodeManager;
private String firstPasscode;
private boolean logoutEnabled;
private boolean forceBiometric;
public enum PasscodeMode {
Create,
CreateConfirm,
Check,
Change,
EnableBiometric,
BiometricCheck
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
passcodeManager = SalesforceSDKManager.getInstance().getPasscodeManager();
// Protect against screenshots.
getWindow().setFlags(WindowManager.LayoutParams.FLAG_SECURE,
WindowManager.LayoutParams.FLAG_SECURE);
setContentView(R.layout.sf__passcode);
title = findViewById(R.id.sf__passcode_title);
instr = findViewById(R.id.sf__passcode_instructions);
passcodeField = findViewById(R.id.sf__passcode_text);
passcodeField.addTextChangedListener(new TextWatcher() {
@Override
public void beforeTextChanged(CharSequence s, int start, int count, int after) { }
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
final String passcode = s.toString();
if (passcodeManager.getPasscodeLengthKnown() && passcode.length() == passcodeManager.getPasscodeLength()) {
onSubmit(passcode);
}
}
@Override
public void afterTextChanged(Editable s) { }
});
if (passcodeManager.getPasscodeLengthKnown()) {
passcodeField.setHint(getString(R.string.sf__accessibility_passcode_length_hint, passcodeManager.getPasscodeLength()));
}
passcodeBox = findViewById(R.id.sf__passcode_box);
logoutButton = findViewById(R.id.sf__passcode_logout_button);
logoutButton.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
signoutAllUsers();
}
});
verifyButton = findViewById(R.id.sf__passcode_verify_button);
verifyButton.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
Editable passcode = passcodeField.getText();
if (passcode != null) {
onSubmit(passcode.toString());
}
}
});
fingerImage = findViewById(R.id.sf__fingerprint_icon);
bioInstrTitle = findViewById(R.id.sf__biometric_instructions_title);
passcodeField.announceForAccessibility(bioInstrTitle.getText());
bioInstr = findViewById(R.id.sf__biometric_instructions);
bioInstr.setText(getString(R.string.sf__biometric_allow_instructions, SalesforceSDKManager.getInstance().provideAppName()));
biometricBox = findViewById(R.id.sf__biometric_box);
notNowButton = findViewById(R.id.sf__biometric_not_now_button);
notNowButton.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
biometricDeclined();
}
});
enableButton = findViewById(R.id.sf__biometric_enable_button);
enableButton.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
launchBiometricAuth();
}
});
clearUi();
// Asking passcode manager is a change passcode flow is required
if (passcodeManager.isPasscodeChangeRequired()) {
setMode(PasscodeMode.Change);
} else {
if (passcodeManager.hasStoredPasscode(this)) {
PasscodeMode mode = passcodeManager.biometricEnabled() ? PasscodeMode.BiometricCheck : PasscodeMode.Check;
setMode(mode);
} else {
setMode(PasscodeMode.Create);
}
}
logoutEnabled = true;
forceBiometric = false;
if (savedInstanceState != null) {
final String inputText = savedInstanceState.getString(EXTRA_KEY);
if (passcodeField != null && inputText != null) {
passcodeField.setText(inputText.trim());
}
}
}
protected void biometricDeclined() {
if (passcodeManager.biometricEnabled()) {
setMode(PasscodeMode.Check);
} else {
passcodeManager.setBiometricEnabled(PasscodeActivity.this, false);
passcodeManager.unlock();
done();
}
}
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
if (keyCode == KeyEvent.KEYCODE_BACK) {
moveTaskToBack(true);
return true;
}
return super.onKeyDown(keyCode, event);
}
/**
* Saves the entered text before activity rotation.
*/
@Override
protected void onSaveInstanceState(Bundle savedInstance) {
if (passcodeField != null && passcodeField.getText() != null) {
savedInstance.putString(EXTRA_KEY, passcodeField.getText().toString());
}
}
public PasscodeMode getMode() {
return currentMode;
}
public void setMode(PasscodeMode newMode) {
if (newMode == currentMode) return;
if (newMode == PasscodeMode.EnableBiometric && !canShowBiometric()) {
return;
}
clearUi();
switch(newMode) {
case Check:
title.setText(getString(R.string.sf__passcode_enter_title));
title.setVisibility(View.VISIBLE);
instr.setText(getString(R.string.sf__passcode_enter_instructions));
instr.setVisibility(View.VISIBLE);
passcodeBox.setVisibility(View.VISIBLE);
passcodeField.setVisibility(View.VISIBLE);
if (!passcodeManager.getPasscodeLengthKnown()) {
verifyButton.setVisibility(View.VISIBLE);
}
showKeyboard();
sendAccessibilityEvent(instr.getText().toString());
break;
case Create:
title.setText(getString(R.string.sf__passcode_create_title));
title.setVisibility(View.VISIBLE);
// Check if passcodes did not match
int instructionText = (currentMode == PasscodeMode.CreateConfirm) ? R.string.sf__passcodes_dont_match
: R.string.sf__passcode_create_instructions;
instr.setText(getString(instructionText));
instr.setVisibility(View.VISIBLE);
passcodeBox.setVisibility(View.VISIBLE);
passcodeField.setVisibility(View.VISIBLE);
passcodeField.requestFocus();
showKeyboard();
sendAccessibilityEvent(instr.getText().toString());
break;
case CreateConfirm:
title.setText(getString(R.string.sf__passcode_confirm_title));
title.setVisibility(View.VISIBLE);
instr.setText(getString(R.string.sf__passcode_confirm_instructions));
instr.setVisibility(View.VISIBLE);
passcodeBox.setVisibility(View.VISIBLE);
passcodeField.setVisibility(View.VISIBLE);
passcodeField.requestFocus();
showKeyboard();
sendAccessibilityEvent(instr.getText().toString());
break;
case Change:
title.setText(getString(R.string.sf__passcode_change_title));
title.setVisibility(View.VISIBLE);
instr.setText(getString(R.string.sf__passcode_change_instructions));
instr.setVisibility(View.VISIBLE);
passcodeBox.setVisibility(View.VISIBLE);
passcodeField.setVisibility(View.VISIBLE);
passcodeField.requestFocus();
showKeyboard();
sendAccessibilityEvent(instr.getText().toString());
break;
case EnableBiometric:
hideKeyboard();
title.setText(getString(R.string.sf__biometric_title));
title.setVisibility(View.VISIBLE);
biometricBox.setVisibility(View.VISIBLE);
bioInstrTitle.setVisibility(View.VISIBLE);
sendAccessibilityEvent(bioInstrTitle.getText().toString());
bioInstr.setVisibility(View.VISIBLE);
notNowButton.setVisibility(View.VISIBLE);
enableButton.setVisibility(View.VISIBLE);
fingerImage.setVisibility(View.VISIBLE);
passcodeManager.setBiometricEnrollmentShown(this, true);
break;
case BiometricCheck:
hideKeyboard();
if (canShowBiometric()) {
launchBiometricAuth();
} else {
setMode(PasscodeMode.Check);
}
break;
}
passcodeField.setText("");
currentMode = newMode;
}
/**
* Used from tests to allow/disallow automatic logout when wrong passcode has been entered too many times.
*
* @param b True - if logout is enabled, False - otherwise.
*/
public void enableLogout(boolean b) {
logoutEnabled = b;
}
/**
* Used for tests to allow biometric when the device is not set up
*
* @param b True - if biometric checks skipped, False - otherwise.
*/
public void forceBiometric(boolean b) {
forceBiometric = b;
}
protected boolean onSubmit(String enteredPasscode) {
boolean showBiometricEnrollment = !passcodeManager.biometricEnabled() &&
!passcodeManager.biometricEnrollmentShown() &&
passcodeManager.biometricAllowed() &&
canShowBiometric();
switch (getMode()) {
case Create:
firstPasscode = enteredPasscode;
setMode(PasscodeMode.CreateConfirm);
return true;
case CreateConfirm:
if (enteredPasscode.equals(firstPasscode)) {
passcodeManager.store(this, enteredPasscode);
if (showBiometricEnrollment) {
setMode(PasscodeMode.EnableBiometric);
} else {
passcodeManager.unlock();
done();
}
} else {
setMode(PasscodeMode.Create);
}
return true;
case Check:
if (passcodeManager.check(this, enteredPasscode)) {
sendAccessibilityEvent(getString(R.string.sf__accessibility_unlock_announcement));
if (!passcodeManager.getPasscodeLengthKnown()) {
passcodeManager.setPasscodeLength(this, enteredPasscode.length());
}
if (showBiometricEnrollment) {
setMode(PasscodeMode.EnableBiometric);
} else {
passcodeManager.unlock();
done();
}
} else {
logoutButton.setVisibility(View.VISIBLE);
int attempts = passcodeManager.addFailedPasscodeAttempt();
passcodeField.setText("");
int maxAttempts = getMaxPasscodeAttempts();
if (attempts < maxAttempts - 1) {
instr.setText(getString(R.string.sf__passcode_try_again, (maxAttempts - attempts)));
sendAccessibilityEvent(instr.getText().toString());
} else if (attempts < maxAttempts) {
instr.setText(getString(R.string.sf__passcode_final));
sendAccessibilityEvent(instr.getText().toString());
} else {
signoutAllUsers();
}
}
return true;
case Change:
firstPasscode = enteredPasscode;
setMode(PasscodeMode.CreateConfirm);
return true;
}
return false;
}
protected void done() {
setResult(RESULT_OK);
finish();
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected int getLayoutId() {
return R.layout.sf__passcode;
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected TextView getTitleView() {
return (TextView) findViewById(R.id.sf__passcode_title);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected TextView getInstructionsView() {
return (TextView) findViewById(R.id.sf__passcode_instructions);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getCreateTitle() {
return getString(R.string.sf__passcode_create_title);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getEnterTitle() {
return getString(R.string.sf__passcode_enter_title);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getConfirmTitle() {
return getString(R.string.sf__passcode_confirm_title);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getEnterInstructions() {
return getString(R.string.sf__passcode_enter_instructions);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getCreateInstructions() {
return getString(R.string.sf__passcode_create_instructions);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getChangeInstructions() {
return getString(R.string.sf__passcode_change_instructions);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getConfirmInstructions() {
return getString(R.string.sf__passcode_confirm_instructions);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getPasscodeTryAgainError(int countAttemptsLeft) {
return getString(R.string.sf__passcode_try_again, countAttemptsLeft);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getPasscodeFinalAttemptError() {
return getString(R.string.sf__passcode_final);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected String getPasscodesDontMatchError() {
return getString(R.string.sf__passcodes_dont_match);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected Button getLogoutButton() {
return findViewById(R.id.sf__passcode_logout_button);
}
/**
* @deprecated Will be removed in Mobile SDK 8.0. Override in XML instead.
*/
protected Button getVerifyButton() {
return findViewById(R.id.sf__passcode_verify_button);
}
/**
* @return maximum number of passcode attempts
*/
protected int getMaxPasscodeAttempts() {
return MAX_PASSCODE_ATTEMPTS;
}
private void signoutAllUsers() {
passcodeManager.reset(this);
sendAccessibilityEvent(getString(R.string.sf__accessibility_logged_out_announcement));
// Used for tests
if (!logoutEnabled) {
return;
}
final UserAccountManager userAccMgr = SalesforceSDKManager.getInstance().getUserAccountManager();
final List<UserAccount> userAccounts = userAccMgr.getAuthenticatedUsers();
/*
* If the user forgot his/her passcode, we log all the authenticated
* users out. All the existing accounts except the last account
* are removed without dismissing the PasscodeActivity. The last
* account is removed, after which the PasscodeActivity is dismissed,
* and the login page is brought up at this point.
*/
if (userAccounts != null) {
int numAccounts = userAccounts.size();
if (numAccounts > 0) {
for (int i = 0; i < numAccounts - 1; i++) {
final UserAccount account = userAccounts.get(i);
userAccMgr.signoutUser(account, null, false);
}
final UserAccount lastAccount = userAccounts.get(numAccounts - 1);
userAccMgr.signoutUser(lastAccount, PasscodeActivity.this);
}
} else {
userAccMgr.signoutCurrentUser(PasscodeActivity.this);
}
}
/**
* Displays the fingerprint dialog. This can be overridden to provide
* a custom fingerprint auth layout if the app chooses to do so.
*/
protected void showFingerprintDialog() {
final FingerprintAuthDialogFragment fingerprintAuthDialog = new FingerprintAuthDialogFragment();
fingerprintAuthDialog.setContext(this);
fingerprintAuthDialog.show(getFragmentManager(), "fingerprintDialog");
}
/**
* Displays the dialog provided by the OS for biometric authentication
* using {@link BiometricPrompt}.
*/
@TargetApi(VERSION_CODES.P)
protected void showBiometricDialog() {
/*
* TODO: Remove this check once minAPI >= 28.
*/
if (VERSION.SDK_INT >= VERSION_CODES.P) {
final BiometricPrompt.Builder bioBuilder = new BiometricPrompt.Builder(this);
bioBuilder.setDescription(getString(R.string.sf__fingerprint_description, SalesforceSDKManager.getInstance().provideAppName()));
bioBuilder.setTitle(getString(R.string.sf__fingerprint_title));
bioBuilder.setNegativeButton(getString(R.string.sf__fingerprint_cancel), getMainExecutor(),
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
biometricDeclined();
}
});
final BiometricPrompt bioPrompt = bioBuilder.build();
bioPrompt.authenticate(new CancellationSignal(), getMainExecutor(),
new BiometricPrompt.AuthenticationCallback() {
@Override
public void onAuthenticationError(int errorCode, CharSequence errString) {
super.onAuthenticationError(errorCode, errString);
biometricDeclined();
}
@Override
public void onAuthenticationHelp(int helpCode, CharSequence helpString) {
super.onAuthenticationHelp(helpCode, helpString);
}
@Override
public void onAuthenticationSucceeded(BiometricPrompt.AuthenticationResult result) {
unlockViaFingerprintScan();
}
@Override
public void onAuthenticationFailed() {
super.onAuthenticationFailed();
}
});
}
}
@TargetApi(VERSION_CODES.M)
private boolean isFingerprintEnabled() {
// Used for tests
if (forceBiometric) {
return true;
}
/*
* TODO: Remove this check once minAPI >= 23.
*/
if (VERSION.SDK_INT >= VERSION_CODES.M) {
final FingerprintManager fingerprintManager = (FingerprintManager) this.getSystemService(Context.FINGERPRINT_SERVICE);
// Here, this activity is the current activity.
if (checkSelfPermission(Manifest.permission.USE_FINGERPRINT) != PackageManager.PERMISSION_GRANTED) {
requestPermissions(new String[]{ permission.USE_FINGERPRINT}, REQUEST_CODE_ASK_PERMISSIONS);
} else {
return fingerprintManager != null && fingerprintManager.isHardwareDetected()
&& fingerprintManager.hasEnrolledFingerprints();
}
}
return false;
}
@Override
public void onRequestPermissionsResult(int requestCode, String[] permissions, int[] grantResults) {
if (requestCode == REQUEST_CODE_ASK_PERMISSIONS && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
launchBiometricAuth();
return;
}
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
}
public void unlockViaFingerprintScan() {
if (!passcodeManager.biometricEnabled()) {
passcodeManager.setBiometricEnabled(this, true);
}
passcodeManager.unlock();
done();
}
private boolean canShowBiometric() {
return passcodeManager.biometricAllowed() && isFingerprintEnabled();
}
private void launchBiometricAuth() {
if (passcodeManager != null && canShowBiometric()) {
if (VERSION.SDK_INT >= VERSION_CODES.P) {
showBiometricDialog();
} else if (VERSION.SDK_INT >= VERSION_CODES.M) {
showFingerprintDialog();
}
}
}
private void clearUi() {
title.setVisibility(View.GONE);
instr.setVisibility(View.GONE);
passcodeField.setVisibility(View.GONE);
passcodeBox.setVisibility(View.GONE);
logoutButton.setVisibility(View.GONE);
verifyButton.setVisibility(View.GONE);
bioInstrTitle.setVisibility(View.GONE);
bioInstr.setVisibility(View.GONE);
notNowButton.setVisibility(View.GONE);
enableButton.setVisibility(View.GONE);
biometricBox.setVisibility(View.GONE);
fingerImage.setVisibility(View.GONE);
}
private void hideKeyboard() {
InputMethodManager imm = (InputMethodManager) this.getSystemService(Activity.INPUT_METHOD_SERVICE);
if (this.passcodeField != null) {
imm.hideSoftInputFromWindow(this.passcodeField.getWindowToken(), 0);
}
}
private void showKeyboard() {
AccessibilityManager am = (AccessibilityManager) this.getSystemService(Context.ACCESSIBILITY_SERVICE);
if (am.isEnabled()) {
// Check if keyboard is shown based on verify button, which is oriented to the bottom of
// the layout. Checking window instead of screen even works for split screen.
int[] location = new int[2];
verifyButton.getLocationInWindow(location);
if (location[1] == 0) {
passcodeField.requestFocus();
}
} else {
passcodeField.requestFocus();
}
}
private void sendAccessibilityEvent(String text) {
AccessibilityManager am = (AccessibilityManager) this.getSystemService(Context.ACCESSIBILITY_SERVICE);
if (am.isEnabled()) {
AccessibilityEvent event = AccessibilityEvent.obtain();
event.setEventType(AccessibilityEvent.TYPE_WINDOW_STATE_CHANGED);
event.setClassName(getClass().getName());
event.setPackageName(this.getPackageName());
event.getText().add(text);
am.sendAccessibilityEvent(event);
}
}
} | 1 | 17,528 | Issue is that `currentMode = newMode;` is called at the end of the method. Passcode screen will come up, but since the activity thinks current mode is biometric check we don't check passcode when submitted. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -51,7 +51,7 @@ import org.tikv.kvproto.Coprocessor.KeyRange;
public class ScanAnalyzer {
private static final double INDEX_SCAN_COST_FACTOR = 1.2;
private static final double TABLE_SCAN_COST_FACTOR = 1.0;
- private static final double DOUBLE_READ_COST_FACTOR = TABLE_SCAN_COST_FACTOR * 3;
+ private static final double DOUBLE_READ_COST_FACTOR = 0.2; // TABLE_SCAN_COST_FACTOR * 3;
public static class ScanPlan {
ScanPlan( | 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv.predicates;
import static com.google.common.base.Preconditions.checkArgument;
import static com.pingcap.tikv.predicates.PredicateUtils.expressionToIndexRanges;
import static com.pingcap.tikv.util.KeyRangeUtils.makeCoprocRange;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.BoundType;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Range;
import com.pingcap.tikv.exception.TiClientInternalException;
import com.pingcap.tikv.expression.Expression;
import com.pingcap.tikv.expression.visitor.IndexMatcher;
import com.pingcap.tikv.expression.visitor.MetaResolver;
import com.pingcap.tikv.expression.visitor.PrunedPartitionBuilder;
import com.pingcap.tikv.key.IndexScanKeyRangeBuilder;
import com.pingcap.tikv.key.Key;
import com.pingcap.tikv.key.RowKey;
import com.pingcap.tikv.key.TypedKey;
import com.pingcap.tikv.meta.TiColumnInfo;
import com.pingcap.tikv.meta.TiIndexColumn;
import com.pingcap.tikv.meta.TiIndexInfo;
import com.pingcap.tikv.meta.TiPartitionDef;
import com.pingcap.tikv.meta.TiTableInfo;
import com.pingcap.tikv.statistics.IndexStatistics;
import com.pingcap.tikv.statistics.TableStatistics;
import com.pingcap.tikv.types.DataType;
import com.pingcap.tikv.util.Pair;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.tikv.kvproto.Coprocessor.KeyRange;
public class ScanAnalyzer {
private static final double INDEX_SCAN_COST_FACTOR = 1.2;
private static final double TABLE_SCAN_COST_FACTOR = 1.0;
private static final double DOUBLE_READ_COST_FACTOR = TABLE_SCAN_COST_FACTOR * 3;
public static class ScanPlan {
ScanPlan(
List<KeyRange> keyRanges,
Set<Expression> filters,
TiIndexInfo index,
double cost,
boolean isDoubleRead,
double estimatedRowCount,
List<TiPartitionDef> partDefs) {
this.filters = filters;
this.keyRanges = keyRanges;
this.cost = cost;
this.index = index;
this.isDoubleRead = isDoubleRead;
this.estimatedRowCount = estimatedRowCount;
this.prunedParts = partDefs;
}
private final List<KeyRange> keyRanges;
private final Set<Expression> filters;
private final double cost;
private TiIndexInfo index;
private final boolean isDoubleRead;
private final double estimatedRowCount;
private final List<TiPartitionDef> prunedParts;
public double getEstimatedRowCount() {
return estimatedRowCount;
}
public List<KeyRange> getKeyRanges() {
return keyRanges;
}
public Set<Expression> getFilters() {
return filters;
}
public double getCost() {
return cost;
}
public boolean isIndexScan() {
return index != null && !index.isFakePrimaryKey();
}
public TiIndexInfo getIndex() {
return index;
}
public boolean isDoubleRead() {
return isDoubleRead;
}
public List<TiPartitionDef> getPrunedParts() {
return prunedParts;
}
}
// build a scan for debug purpose.
public ScanPlan buildScan(
List<TiColumnInfo> columnList, List<Expression> conditions, TiTableInfo table) {
return buildScan(columnList, conditions, table, null);
}
// Build scan plan picking access path with lowest cost by estimation
public ScanPlan buildScan(
List<TiColumnInfo> columnList,
List<Expression> conditions,
TiTableInfo table,
TableStatistics tableStatistics) {
ScanPlan minPlan = buildTableScan(conditions, table, tableStatistics);
double minCost = minPlan.getCost();
for (TiIndexInfo index : table.getIndices()) {
ScanPlan plan = buildIndexScan(columnList, conditions, index, table, tableStatistics);
if (plan.getCost() < minCost) {
minPlan = plan;
minCost = plan.getCost();
}
}
return minPlan;
}
public ScanPlan buildTableScan(
List<Expression> conditions, TiTableInfo table, TableStatistics tableStatistics) {
TiIndexInfo pkIndex = TiIndexInfo.generateFakePrimaryKeyIndex(table);
return buildIndexScan(table.getColumns(), conditions, pkIndex, table, tableStatistics);
}
ScanPlan buildIndexScan(
List<TiColumnInfo> columnList,
List<Expression> conditions,
TiIndexInfo index,
TiTableInfo table,
TableStatistics tableStatistics) {
requireNonNull(table, "Table cannot be null to encoding keyRange");
requireNonNull(conditions, "conditions cannot be null to encoding keyRange");
MetaResolver.resolve(conditions, table);
ScanSpec result = extractConditions(conditions, table, index);
double cost = SelectivityCalculator.calcPseudoSelectivity(result);
List<IndexRange> irs =
expressionToIndexRanges(
result.getPointPredicates(), result.getRangePredicate(), table, index);
List<TiPartitionDef> prunedParts = null;
// apply partition pruning here.
if (table.getPartitionInfo() != null) {
PrunedPartitionBuilder prunedPartBuilder = new PrunedPartitionBuilder();
prunedParts = prunedPartBuilder.prune(table, conditions);
}
List<KeyRange> keyRanges;
boolean isDoubleRead = false;
double estimatedRowCount = -1;
// table name and columns
int tableColSize = table.getColumns().size() + 1;
if (index == null || index.isFakePrimaryKey()) {
if (tableStatistics != null) {
cost = 100.0; // Full table scan cost
// TODO: Fine-grained statistics usage
}
keyRanges = buildTableScanKeyRange(table, irs, prunedParts);
cost *= tableColSize * TABLE_SCAN_COST_FACTOR;
} else {
if (tableStatistics != null) {
long totalRowCount = tableStatistics.getCount();
IndexStatistics indexStatistics = tableStatistics.getIndexHistMap().get(index.getId());
if (conditions.isEmpty()) {
cost = 100.0; // Full index scan cost
// TODO: Fine-grained statistics usage
estimatedRowCount = totalRowCount;
} else if (indexStatistics != null) {
double idxRangeRowCnt = indexStatistics.getRowCount(irs);
// guess the percentage of rows hit
cost = 100.0 * idxRangeRowCnt / totalRowCount;
estimatedRowCount = idxRangeRowCnt;
}
}
isDoubleRead = !isCoveringIndex(columnList, index, table.isPkHandle());
// table name, index and handle column
int indexSize = index.getIndexColumns().size() + 2;
if (isDoubleRead) {
cost *= tableColSize * DOUBLE_READ_COST_FACTOR + indexSize * INDEX_SCAN_COST_FACTOR;
} else {
cost *= indexSize * INDEX_SCAN_COST_FACTOR;
}
keyRanges = buildIndexScanKeyRange(table, index, irs, prunedParts);
}
return new ScanPlan(
keyRanges,
result.getResidualPredicates(),
index,
cost,
isDoubleRead,
estimatedRowCount,
prunedParts);
}
private Pair<Key, Key> buildTableScanKeyRangePerId(long id, IndexRange ir) {
Key startKey;
Key endKey;
if (ir.hasAccessKey()) {
checkArgument(
!ir.hasRange(), "Table scan must have one and only one access condition / point");
Key key = ir.getAccessKey();
checkArgument(key instanceof TypedKey, "Table scan key range must be typed key");
TypedKey typedKey = (TypedKey) key;
startKey = RowKey.toRowKey(id, typedKey);
endKey = startKey.next();
} else if (ir.hasRange()) {
checkArgument(
!ir.hasAccessKey(), "Table scan must have one and only one access condition / point");
Range<TypedKey> r = ir.getRange();
if (!r.hasLowerBound()) {
// -INF
startKey = RowKey.createMin(id);
} else {
// Comparison with null should be filtered since it yields unknown always
startKey = RowKey.toRowKey(id, r.lowerEndpoint());
if (r.lowerBoundType().equals(BoundType.OPEN)) {
startKey = startKey.next();
}
}
if (!r.hasUpperBound()) {
// INF
endKey = RowKey.createBeyondMax(id);
} else {
endKey = RowKey.toRowKey(id, r.upperEndpoint());
if (r.upperBoundType().equals(BoundType.CLOSED)) {
endKey = endKey.next();
}
}
} else {
throw new TiClientInternalException("Empty access conditions");
}
return new Pair<>(startKey, endKey);
}
private List<KeyRange> buildTableScanKeyRangeWithIds(
List<Long> ids, List<IndexRange> indexRanges) {
List<KeyRange> ranges = new ArrayList<>(indexRanges.size());
for (Long id : ids) {
indexRanges.forEach(
(ir) -> {
Pair<Key, Key> pairKey = buildTableScanKeyRangePerId(id, ir);
Key startKey = pairKey.first;
Key endKey = pairKey.second;
// This range only possible when < MIN or > MAX
if (!startKey.equals(endKey)) {
ranges.add(makeCoprocRange(startKey.toByteString(), endKey.toByteString()));
}
});
}
return ranges;
}
@VisibleForTesting
List<KeyRange> buildTableScanKeyRange(
TiTableInfo table, List<IndexRange> indexRanges, List<TiPartitionDef> prunedParts) {
requireNonNull(table, "Table is null");
requireNonNull(indexRanges, "indexRanges is null");
if (table.isPartitionEnabled()) {
List<Long> ids = new ArrayList<>();
for (TiPartitionDef pDef : prunedParts) {
ids.add(pDef.getId());
}
return buildTableScanKeyRangeWithIds(ids, indexRanges);
} else {
return buildTableScanKeyRangeWithIds(ImmutableList.of(table.getId()), indexRanges);
}
}
@VisibleForTesting
List<KeyRange> buildIndexScanKeyRange(
TiTableInfo table,
TiIndexInfo index,
List<IndexRange> indexRanges,
List<TiPartitionDef> prunedParts) {
requireNonNull(table, "Table cannot be null to encoding keyRange");
requireNonNull(index, "Index cannot be null to encoding keyRange");
requireNonNull(indexRanges, "indexRanges cannot be null to encoding keyRange");
List<KeyRange> ranges = new ArrayList<>(indexRanges.size());
for (IndexRange ir : indexRanges) {
if (!table.isPartitionEnabled()) {
IndexScanKeyRangeBuilder indexScanKeyRangeBuilder =
new IndexScanKeyRangeBuilder(table.getId(), index, ir);
ranges.add(indexScanKeyRangeBuilder.compute());
} else {
for (TiPartitionDef pDef : prunedParts) {
IndexScanKeyRangeBuilder indexScanKeyRangeBuilder =
new IndexScanKeyRangeBuilder(pDef.getId(), index, ir);
ranges.add(indexScanKeyRangeBuilder.compute());
}
}
}
return ranges;
}
boolean isCoveringIndex(
List<TiColumnInfo> columns, TiIndexInfo indexColumns, boolean pkIsHandle) {
for (TiColumnInfo colInfo : columns) {
if (pkIsHandle && colInfo.isPrimaryKey()) {
continue;
}
if (colInfo.getId() == -1) {
continue;
}
boolean isIndexColumn = false;
for (TiIndexColumn indexCol : indexColumns.getIndexColumns()) {
boolean isFullLength =
indexCol.getLength() == DataType.UNSPECIFIED_LEN
|| indexCol.getLength() == colInfo.getType().getLength();
if (colInfo.getName().equalsIgnoreCase(indexCol.getName()) && isFullLength) {
isIndexColumn = true;
break;
}
}
if (!isIndexColumn) {
return false;
}
}
return true;
}
@VisibleForTesting
public static ScanSpec extractConditions(
List<Expression> conditions, TiTableInfo table, TiIndexInfo index) {
// 0. Different than TiDB implementation, here logic has been unified for TableScan and
// IndexScan by
// adding fake index on clustered table's pk
// 1. Generate access point based on equal conditions
// 2. Cut access point condition if index is not continuous
// 3. Push back prefix index conditions since prefix index retrieve more result than needed
// 4. For remaining indexes (since access conditions consume some index, and they will
// not be used in filter push down later), find continuous matching index until first unmatched
// 5. Push back index related filter if prefix index, for remaining filters
// Equal conditions needs to be process first according to index sequence
// When index is null, no access condition can be applied
ScanSpec.Builder specBuilder = new ScanSpec.Builder(table, index);
if (index != null) {
Set<Expression> visited = new HashSet<>();
IndexMatchingLoop:
for (int i = 0; i < index.getIndexColumns().size(); i++) {
// for each index column try matches an equal condition
// and push remaining back
// TODO: if more than one equal conditions match an
// index, it likely yields nothing. Maybe a check needed
// to simplify it to a false condition
TiIndexColumn col = index.getIndexColumns().get(i);
IndexMatcher eqMatcher = IndexMatcher.equalOnlyMatcher(col);
boolean found = false;
// For first prefix index encountered, it equals to a range
// and we cannot push equal conditions further
for (Expression cond : conditions) {
if (visited.contains(cond)) {
continue;
}
if (eqMatcher.match(cond)) {
specBuilder.addPointPredicate(col, cond);
if (col.isPrefixIndex()) {
specBuilder.addResidualPredicate(cond);
break IndexMatchingLoop;
}
visited.add(cond);
found = true;
break;
}
}
if (!found) {
// For first "broken index chain piece"
// search for a matching range condition
IndexMatcher matcher = IndexMatcher.matcher(col);
for (Expression cond : conditions) {
if (visited.contains(cond)) {
continue;
}
if (matcher.match(cond)) {
specBuilder.addRangePredicate(col, cond);
if (col.isPrefixIndex()) {
specBuilder.addResidualPredicate(cond);
break;
}
}
}
break;
}
}
}
specBuilder.addAllPredicates(conditions);
return specBuilder.build();
}
}
| 1 | 9,702 | Need change it back? | pingcap-tispark | java |
@@ -0,0 +1,12 @@
+# Be sure to restart your server when you modify this file.
+
+# Your secret key is used for verifying the integrity of signed cookies.
+# If you change this key, all old signed cookies will become invalid!
+
+# Make sure the secret is at least 30 characters and all random,
+# no regular words or you'll be exposed to dictionary attacks.
+# You can use `rake secret` to generate a secure secret key.
+
+# Make sure your secret_key_base is kept private
+# if you're sharing your code publicly.
+OpenStreetMap::Application.config.secret_key_base = '8be565e2e25831d88231f4bcfd83dfc3ab33957c4f33bb3cff7a279f7820ec4d0c1111f246bb347c68859c0b46b4e591be9179a53a5a7165a9936000d6d2be41' | 1 | 1 | 8,654 | Didn't we delete this file a while back because it isn't used but people think that it's a security vulnerability that it's checked in? | openstreetmap-openstreetmap-website | rb |
|
@@ -2,6 +2,7 @@ using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
+using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Logging;
using Newtonsoft.Json;
using Newtonsoft.Json.Linq; | 1 | using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using Datadog.Trace.Logging;
using Newtonsoft.Json;
using Newtonsoft.Json.Linq;
namespace Datadog.Trace.Configuration
{
/// <summary>
/// Represents a configuration source that retrieves
/// values from the provided JSON string.
/// </summary>
public class JsonConfigurationSource : IConfigurationSource
{
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(JsonConfigurationSource));
private readonly JObject _configuration;
/// <summary>
/// Initializes a new instance of the <see cref="JsonConfigurationSource"/>
/// class with the specified JSON string.
/// </summary>
/// <param name="json">A JSON string that contains configuration values.</param>
public JsonConfigurationSource(string json)
{
_configuration = (JObject)JsonConvert.DeserializeObject(json);
}
/// <summary>
/// Creates a new <see cref="JsonConfigurationSource"/> instance
/// by loading the JSON string from the specified file.
/// </summary>
/// <param name="filename">A JSON file that contains configuration values.</param>
/// <returns>The newly created configuration source.</returns>
public static JsonConfigurationSource FromFile(string filename)
{
string json = File.ReadAllText(filename);
return new JsonConfigurationSource(json);
}
/// <summary>
/// Gets the <see cref="string"/> value of
/// the setting with the specified key.
/// Supports JPath.
/// </summary>
/// <param name="key">The key that identifies the setting.</param>
/// <returns>The value of the setting, or null if not found.</returns>
string IConfigurationSource.GetString(string key)
{
return GetValue<string>(key);
}
/// <summary>
/// Gets the <see cref="int"/> value of
/// the setting with the specified key.
/// Supports JPath.
/// </summary>
/// <param name="key">The key that identifies the setting.</param>
/// <returns>The value of the setting, or null if not found.</returns>
int? IConfigurationSource.GetInt32(string key)
{
return GetValue<int?>(key);
}
/// <summary>
/// Gets the <see cref="double"/> value of
/// the setting with the specified key.
/// Supports JPath.
/// </summary>
/// <param name="key">The key that identifies the setting.</param>
/// <returns>The value of the setting, or null if not found.</returns>
double? IConfigurationSource.GetDouble(string key)
{
return GetValue<double?>(key);
}
/// <summary>
/// Gets the <see cref="bool"/> value of
/// the setting with the specified key.
/// Supports JPath.
/// </summary>
/// <param name="key">The key that identifies the setting.</param>
/// <returns>The value of the setting, or null if not found.</returns>
bool? IConfigurationSource.GetBool(string key)
{
return GetValue<bool?>(key);
}
/// <summary>
/// Gets the value of the setting with the specified key and converts it into type <typeparamref name="T"/>.
/// Supports JPath.
/// </summary>
/// <typeparam name="T">The type to convert the setting value into.</typeparam>
/// <param name="key">The key that identifies the setting.</param>
/// <returns>The value of the setting, or the default value of T if not found.</returns>
public T GetValue<T>(string key)
{
JToken token = _configuration.SelectToken(key, errorWhenNoMatch: false);
return token == null
? default
: token.Value<T>();
}
/// <summary>
/// Gets a <see cref="ConcurrentDictionary{TKey, TValue}"/> containing all of the values.
/// </summary>
/// <remarks>
/// Example JSON where `globalTags` is the configuration key.
/// {
/// "globalTags": {
/// "name1": "value1",
/// "name2": "value2"
/// }
/// }
/// </remarks>
/// <param name="key">The key that identifies the setting.</param>
/// <returns><see cref="IDictionary{TKey, TValue}"/> containing all of the key-value pairs.</returns>
/// <exception cref="JsonReaderException">Thrown if the configuration value is not a valid JSON string.</exception>"
public IDictionary<string, string> GetDictionary(string key)
{
var token = _configuration.SelectToken(key, errorWhenNoMatch: false);
if (token == null)
{
return null;
}
if (token.Type == JTokenType.Object)
{
try
{
var dictionary = token
?.ToObject<ConcurrentDictionary<string, string>>();
return dictionary;
}
catch (Exception e)
{
Log.Error(e, "Unable to parse configuration value for {0} as key-value pairs of strings.", key);
return null;
}
}
return StringConfigurationSource.ParseCustomKeyValues(token.ToString());
}
}
}
| 1 | 16,620 | nit: Looks like this can be removed now | DataDog-dd-trace-dotnet | .cs |
@@ -54,15 +54,9 @@ func (p *PKI) Provision(ctx caddy.Context) error {
p.ctx = ctx
p.log = ctx.Logger(p)
- // if this app is initialized at all, ensure there's at
- // least a default CA that can be used: the standard CA
- // which is used implicitly for signing local-use certs
if p.CAs == nil {
p.CAs = make(map[string]*CA)
}
- if _, ok := p.CAs[DefaultCAID]; !ok {
- p.CAs[DefaultCAID] = new(CA)
- }
for caID, ca := range p.CAs {
err := ca.Provision(ctx, caID, p.log) | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddypki
import (
"fmt"
"github.com/caddyserver/caddy/v2"
"go.uber.org/zap"
)
func init() {
caddy.RegisterModule(PKI{})
}
// PKI provides Public Key Infrastructure facilities for Caddy.
//
// This app can define certificate authorities (CAs) which are capable
// of signing certificates. Other modules can be configured to use
// the CAs defined by this app for issuing certificates or getting
// key information needed for establishing trust.
type PKI struct {
// The certificate authorities to manage. Each CA is keyed by an
// ID that is used to uniquely identify it from other CAs.
// The default CA ID is "local".
CAs map[string]*CA `json:"certificate_authorities,omitempty"`
ctx caddy.Context
log *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (PKI) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "pki",
New: func() caddy.Module { return new(PKI) },
}
}
// Provision sets up the configuration for the PKI app.
func (p *PKI) Provision(ctx caddy.Context) error {
p.ctx = ctx
p.log = ctx.Logger(p)
// if this app is initialized at all, ensure there's at
// least a default CA that can be used: the standard CA
// which is used implicitly for signing local-use certs
if p.CAs == nil {
p.CAs = make(map[string]*CA)
}
if _, ok := p.CAs[DefaultCAID]; !ok {
p.CAs[DefaultCAID] = new(CA)
}
for caID, ca := range p.CAs {
err := ca.Provision(ctx, caID, p.log)
if err != nil {
return fmt.Errorf("provisioning CA '%s': %v", caID, err)
}
}
return nil
}
// Start starts the PKI app.
func (p *PKI) Start() error {
// install roots to trust store, if not disabled
for _, ca := range p.CAs {
if ca.InstallTrust != nil && !*ca.InstallTrust {
ca.log.Warn("root certificate trust store installation disabled; unconfigured clients may show warnings",
zap.String("path", ca.rootCertPath))
continue
}
if err := ca.installRoot(); err != nil {
// could be some system dependencies that are missing;
// shouldn't totally prevent startup, but we should log it
ca.log.Error("failed to install root certificate",
zap.Error(err),
zap.String("certificate_file", ca.rootCertPath))
}
}
// see if root/intermediates need renewal...
p.renewCerts()
// ...and keep them renewed
go p.maintenance()
return nil
}
// Stop stops the PKI app.
func (p *PKI) Stop() error {
return nil
}
// Interface guards
var (
_ caddy.Provisioner = (*PKI)(nil)
_ caddy.App = (*PKI)(nil)
)
| 1 | 17,136 | Can probably remove the lines above it too (L60-62), since ProvisionDefaultCA() makes sure the map isn't nil. | caddyserver-caddy | go |
@@ -260,11 +260,15 @@ static void on_context_dispose(h2o_handler_t *_self, h2o_context_t *ctx)
void h2o_status_register(h2o_pathconf_t *conf)
{
+ static int handler_registered = 0;
struct st_h2o_root_status_handler_t *self = (void *)h2o_create_handler(conf, sizeof(*self));
self->super.on_context_init = on_context_init;
self->super.on_context_dispose = on_context_dispose;
self->super.on_req = on_req;
- h2o_config_register_status_handler(conf->global, requests_status_handler);
- h2o_config_register_status_handler(conf->global, events_status_handler);
- h2o_config_register_status_handler(conf->global, durations_status_handler);
+ if (!handler_registered) {
+ handler_registered++;
+ h2o_config_register_status_handler(conf->global, requests_status_handler);
+ h2o_config_register_status_handler(conf->global, events_status_handler);
+ h2o_config_register_status_handler(conf->global, durations_status_handler);
+ }
} | 1 | /*
* Copyright (c) 2016 DeNA Co., Ltd., Kazuho Oku
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "h2o.h"
extern h2o_status_handler_t events_status_handler;
extern h2o_status_handler_t requests_status_handler;
extern h2o_status_handler_t durations_status_handler;
struct st_h2o_status_logger_t {
h2o_logger_t super;
};
struct st_h2o_root_status_handler_t {
h2o_handler_t super;
H2O_VECTOR(h2o_multithread_receiver_t *) receivers;
};
struct st_h2o_status_context_t {
h2o_context_t *ctx;
h2o_multithread_receiver_t receiver;
};
struct st_status_ctx_t {
int active;
void *ctx;
};
struct st_h2o_status_collector_t {
struct {
h2o_req_t *req;
h2o_multithread_receiver_t *receiver;
} src;
size_t num_remaining_threads_atomic;
H2O_VECTOR(struct st_status_ctx_t) status_ctx;
};
struct st_h2o_status_message_t {
h2o_multithread_message_t super;
struct st_h2o_status_collector_t *collector;
};
static void collect_reqs_of_context(struct st_h2o_status_collector_t *collector, h2o_context_t *ctx)
{
int i;
for (i = 0; i < ctx->globalconf->statuses.size; i++) {
struct st_status_ctx_t *sc = collector->status_ctx.entries + i;
h2o_status_handler_t *sh = ctx->globalconf->statuses.entries + i;
if (sc->active && sh->per_thread != NULL)
sh->per_thread(sc->ctx, ctx);
}
if (__sync_sub_and_fetch(&collector->num_remaining_threads_atomic, 1) == 0) {
struct st_h2o_status_message_t *message = h2o_mem_alloc(sizeof(*message));
message->super = (h2o_multithread_message_t){{NULL}};
message->collector = collector;
h2o_multithread_send_message(collector->src.receiver, &message->super);
}
}
static void send_response(struct st_h2o_status_collector_t *collector)
{
static h2o_generator_t generator = {NULL, NULL};
h2o_req_t *req;
size_t nr_statuses;
int i;
int cur_resp = 0;
req = collector->src.req;
if (!req) {
h2o_mem_release_shared(collector);
return;
}
nr_statuses = req->conn->ctx->globalconf->statuses.size;
size_t nr_resp = nr_statuses + 2; // 2 for the footer and header
h2o_iovec_t resp[nr_resp];
memset(resp, 0, sizeof(resp[0]) * nr_resp);
resp[cur_resp++] = (h2o_iovec_t){H2O_STRLIT("{\n")};
int coma_removed = 0;
for (i = 0; i < req->conn->ctx->globalconf->statuses.size; i++) {
h2o_status_handler_t *sh = &req->conn->ctx->globalconf->statuses.entries[i];
if (!collector->status_ctx.entries[i].active) {
continue;
}
resp[cur_resp++] = sh->final(collector->status_ctx.entries[i].ctx, req->conn->ctx->globalconf, req);
if (resp[cur_resp - 1].len > 0 && !coma_removed) {
/* requests come in with a leading coma, replace if with a space */
resp[cur_resp - 1].base[0] = ' ';
coma_removed = 1;
}
}
resp[cur_resp++] = (h2o_iovec_t){H2O_STRLIT("\n}\n")};
req->res.status = 200;
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, H2O_STRLIT("text/plain; charset=utf-8"));
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CACHE_CONTROL, NULL, H2O_STRLIT("no-cache, no-store"));
h2o_start_response(req, &generator);
h2o_send(req, resp, h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("HEAD")) ? 0 : nr_resp,
H2O_SEND_STATE_FINAL);
h2o_mem_release_shared(collector);
}
static void on_collect_notify(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages)
{
struct st_h2o_status_context_t *status_ctx = H2O_STRUCT_FROM_MEMBER(struct st_h2o_status_context_t, receiver, receiver);
while (!h2o_linklist_is_empty(messages)) {
struct st_h2o_status_message_t *message = H2O_STRUCT_FROM_MEMBER(struct st_h2o_status_message_t, super, messages->next);
struct st_h2o_status_collector_t *collector = message->collector;
h2o_linklist_unlink(&message->super.link);
free(message);
if (__sync_add_and_fetch(&collector->num_remaining_threads_atomic, 0) != 0) {
collect_reqs_of_context(collector, status_ctx->ctx);
} else {
send_response(collector);
}
}
}
static void on_collector_dispose(void *_collector)
{
}
static void on_req_close(void *p)
{
struct st_h2o_status_collector_t *collector = *(void **)p;
collector->src.req = NULL;
h2o_mem_release_shared(collector);
}
static int on_req_json(struct st_h2o_root_status_handler_t *self, h2o_req_t *req, h2o_iovec_t status_list)
{
{ /* construct collector and send request to every thread */
struct st_h2o_status_context_t *status_ctx = h2o_context_get_handler_context(req->conn->ctx, &self->super);
struct st_h2o_status_collector_t *collector = h2o_mem_alloc_shared(NULL, sizeof(*collector), on_collector_dispose);
size_t i;
memset(collector, 0, sizeof(*collector));
for (i = 0; i < req->conn->ctx->globalconf->statuses.size; i++) {
h2o_status_handler_t *sh;
h2o_vector_reserve(&req->pool, &collector->status_ctx, collector->status_ctx.size + 1);
sh = &req->conn->ctx->globalconf->statuses.entries[i];
if (status_list.base) {
if (!h2o_contains_token(status_list.base, status_list.len, sh->name.base, sh->name.len, ',')) {
collector->status_ctx.entries[collector->status_ctx.size].active = 0;
goto Skip;
}
}
if (sh->init) {
collector->status_ctx.entries[collector->status_ctx.size].ctx = sh->init();
}
collector->status_ctx.entries[collector->status_ctx.size].active = 1;
Skip:
collector->status_ctx.size++;
}
collector->src.req = req;
collector->src.receiver = &status_ctx->receiver;
collector->num_remaining_threads_atomic = self->receivers.size;
for (i = 0; i != self->receivers.size; ++i) {
struct st_h2o_status_message_t *message = h2o_mem_alloc(sizeof(*message));
*message = (struct st_h2o_status_message_t){{{NULL}}, collector};
h2o_multithread_send_message(self->receivers.entries[i], &message->super);
}
/* collector is also retained by the on_req_close callback */
*(struct st_h2o_status_collector_t **)h2o_mem_alloc_shared(&req->pool, sizeof(collector), on_req_close) = collector;
h2o_mem_addref_shared(collector);
}
return 0;
}
static int on_req(h2o_handler_t *_self, h2o_req_t *req)
{
struct st_h2o_root_status_handler_t *self = (void *)_self;
size_t prefix_len = req->pathconf->path.len - (req->pathconf->path.base[req->pathconf->path.len - 1] == '/');
h2o_iovec_t local_path = h2o_iovec_init(req->path_normalized.base + prefix_len, req->path_normalized.len - prefix_len);
if (local_path.len == 0 || h2o_memis(local_path.base, local_path.len, H2O_STRLIT("/"))) {
/* root of the handler returns HTML that renders the status */
h2o_iovec_t fn;
const char *root = getenv("H2O_ROOT");
if (root == NULL)
root = H2O_TO_STR(H2O_ROOT);
fn = h2o_concat(&req->pool, h2o_iovec_init(root, strlen(root)), h2o_iovec_init(H2O_STRLIT("/share/h2o/status/index.html")));
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CACHE_CONTROL, NULL, H2O_STRLIT("no-cache"));
return h2o_file_send(req, 200, "OK", fn.base, h2o_iovec_init(H2O_STRLIT("text/html; charset=utf-8")), 0);
} else if (h2o_memis(local_path.base, local_path.len, H2O_STRLIT("/json"))) {
int ret;
/* "/json" maps to the JSON API */
h2o_iovec_t status_list = {NULL, 0}; /* NULL means we'll show all statuses */
if (req->query_at != SIZE_MAX && (req->path.len - req->query_at > 6)) {
if (h2o_memis(&req->path.base[req->query_at], 6, "?show=", 6)) {
status_list = h2o_iovec_init(&req->path.base[req->query_at + 6], req->path.len - req->query_at - 6);
}
}
ret = on_req_json(self, req, status_list);
return ret;
}
return -1;
}
static void on_context_init(h2o_handler_t *_self, h2o_context_t *ctx)
{
struct st_h2o_root_status_handler_t *self = (void *)_self;
struct st_h2o_status_context_t *status_ctx = h2o_mem_alloc(sizeof(*status_ctx));
status_ctx->ctx = ctx;
h2o_multithread_register_receiver(ctx->queue, &status_ctx->receiver, on_collect_notify);
h2o_vector_reserve(NULL, &self->receivers, self->receivers.size + 1);
self->receivers.entries[self->receivers.size++] = &status_ctx->receiver;
h2o_context_set_handler_context(ctx, &self->super, status_ctx);
}
static void on_context_dispose(h2o_handler_t *_self, h2o_context_t *ctx)
{
struct st_h2o_root_status_handler_t *self = (void *)_self;
struct st_h2o_status_context_t *status_ctx = h2o_context_get_handler_context(ctx, &self->super);
size_t i;
for (i = 0; i != self->receivers.size; ++i)
if (self->receivers.entries[i] == &status_ctx->receiver)
break;
assert(i != self->receivers.size);
memmove(self->receivers.entries + i + 1, self->receivers.entries + i, self->receivers.size - i - 1);
--self->receivers.size;
h2o_multithread_unregister_receiver(ctx->queue, &status_ctx->receiver);
free(status_ctx);
}
void h2o_status_register(h2o_pathconf_t *conf)
{
struct st_h2o_root_status_handler_t *self = (void *)h2o_create_handler(conf, sizeof(*self));
self->super.on_context_init = on_context_init;
self->super.on_context_dispose = on_context_dispose;
self->super.on_req = on_req;
h2o_config_register_status_handler(conf->global, requests_status_handler);
h2o_config_register_status_handler(conf->global, events_status_handler);
h2o_config_register_status_handler(conf->global, durations_status_handler);
}
| 1 | 13,203 | Do you think it's worth erroring out? It's likely that this is a configuration error that the user might want to know about? | h2o-h2o | c |
@@ -20,7 +20,7 @@ def function():
#todo: no space after hash
# +1: [fixme]
- # FIXME: this is broken
+ # FIXME: this is broken
# +1: [fixme]
# ./TODO: find with notes
# +1: [fixme] | 1 | # -*- encoding=utf-8 -*-
# pylint: disable=missing-docstring, unused-variable
# +1: [fixme]
# FIXME: beep
def function():
variable = "FIXME: Ignore me!"
# +1: [fixme]
test = "text" # FIXME: Valid test
# +1: [fixme]
# TODO: Do something with the variables
# +1: [fixme]
xxx = "n/a" # XXX: Fix this later
# +1: [fixme]
#FIXME: no space after hash
# +1: [fixme]
#todo: no space after hash
# +1: [fixme]
# FIXME: this is broken
# +1: [fixme]
# ./TODO: find with notes
# +1: [fixme]
# TO make something DO: find with regex
# FIXME: this is broken (ISSUE-1234)
#FIXME: in fact nothing to fix #pylint: disable=fixme
#TODO: in fact nothing to do #pylint: disable=fixme
#TODO: in fact nothing to do #pylint: disable=line-too-long, fixme
# Todoist API mentioned should not result in a message.
| 1 | 13,238 | I think the functional test should not change here, this is probably a test in itself :) | PyCQA-pylint | py |
@@ -31,7 +31,16 @@ import (
"github.com/gogits/gogs/modules/setting"
)
-var Sanitizer = bluemonday.UGCPolicy().AllowAttrs("class").Matching(regexp.MustCompile(`[\p{L}\p{N}\s\-_',:\[\]!\./\\\(\)&]*`)).OnElements("code")
+func BuildSanitizer() (p *bluemonday.Policy) {
+ p = bluemonday.UGCPolicy()
+ p.AllowAttrs("class").Matching(regexp.MustCompile(`[\p{L}\p{N}\s\-_',:\[\]!\./\\\(\)&]*`)).OnElements("code")
+
+ p.AllowAttrs("type").Matching(regexp.MustCompile(`^checkbox$`)).OnElements("input")
+ p.AllowAttrs("checked", "disabled").OnElements("input")
+ return p
+}
+
+var Sanitizer = BuildSanitizer()
// EncodeMD5 encodes string to md5 hex value.
func EncodeMD5(str string) string { | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package base
import (
"crypto/hmac"
"crypto/md5"
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"html/template"
"math"
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/Unknwon/com"
"github.com/Unknwon/i18n"
"github.com/microcosm-cc/bluemonday"
"github.com/gogits/chardet"
"github.com/gogits/gogs/modules/avatar"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/setting"
)
var Sanitizer = bluemonday.UGCPolicy().AllowAttrs("class").Matching(regexp.MustCompile(`[\p{L}\p{N}\s\-_',:\[\]!\./\\\(\)&]*`)).OnElements("code")
// EncodeMD5 encodes string to md5 hex value.
func EncodeMD5(str string) string {
m := md5.New()
m.Write([]byte(str))
return hex.EncodeToString(m.Sum(nil))
}
// Encode string to sha1 hex value.
func EncodeSha1(str string) string {
h := sha1.New()
h.Write([]byte(str))
return hex.EncodeToString(h.Sum(nil))
}
func ShortSha(sha1 string) string {
if len(sha1) == 40 {
return sha1[:10]
}
return sha1
}
func DetectEncoding(content []byte) (string, error) {
if utf8.Valid(content) {
log.Debug("Detected encoding: utf-8 (fast)")
return "UTF-8", nil
}
result, err := chardet.NewTextDetector().DetectBest(content)
if result.Charset != "UTF-8" && len(setting.Repository.AnsiCharset) > 0 {
log.Debug("Using default AnsiCharset: %s", setting.Repository.AnsiCharset)
return setting.Repository.AnsiCharset, err
}
log.Debug("Detected encoding: %s", result.Charset)
return result.Charset, err
}
func BasicAuthDecode(encoded string) (string, string, error) {
s, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return "", "", err
}
auth := strings.SplitN(string(s), ":", 2)
return auth[0], auth[1], nil
}
func BasicAuthEncode(username, password string) string {
return base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
}
// GetRandomString generate random string by specify chars.
func GetRandomString(n int, alphabets ...byte) string {
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
rand.Read(bytes)
for i, b := range bytes {
if len(alphabets) == 0 {
bytes[i] = alphanum[b%byte(len(alphanum))]
} else {
bytes[i] = alphabets[b%byte(len(alphabets))]
}
}
return string(bytes)
}
// http://code.google.com/p/go/source/browse/pbkdf2/pbkdf2.go?repo=crypto
func PBKDF2(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
prf := hmac.New(h, password)
hashLen := prf.Size()
numBlocks := (keyLen + hashLen - 1) / hashLen
var buf [4]byte
dk := make([]byte, 0, numBlocks*hashLen)
U := make([]byte, hashLen)
for block := 1; block <= numBlocks; block++ {
// N.B.: || means concatenation, ^ means XOR
// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
// U_1 = PRF(password, salt || uint(i))
prf.Reset()
prf.Write(salt)
buf[0] = byte(block >> 24)
buf[1] = byte(block >> 16)
buf[2] = byte(block >> 8)
buf[3] = byte(block)
prf.Write(buf[:4])
dk = prf.Sum(dk)
T := dk[len(dk)-hashLen:]
copy(U, T)
// U_n = PRF(password, U_(n-1))
for n := 2; n <= iter; n++ {
prf.Reset()
prf.Write(U)
U = U[:0]
U = prf.Sum(U)
for x := range U {
T[x] ^= U[x]
}
}
}
return dk[:keyLen]
}
// verify time limit code
func VerifyTimeLimitCode(data string, minutes int, code string) bool {
if len(code) <= 18 {
return false
}
// split code
start := code[:12]
lives := code[12:18]
if d, err := com.StrTo(lives).Int(); err == nil {
minutes = d
}
// right active code
retCode := CreateTimeLimitCode(data, minutes, start)
if retCode == code && minutes > 0 {
// check time is expired or not
before, _ := time.ParseInLocation("200601021504", start, time.Local)
now := time.Now()
if before.Add(time.Minute*time.Duration(minutes)).Unix() > now.Unix() {
return true
}
}
return false
}
const TimeLimitCodeLength = 12 + 6 + 40
// create a time limit code
// code format: 12 length date time string + 6 minutes string + 40 sha1 encoded string
func CreateTimeLimitCode(data string, minutes int, startInf interface{}) string {
format := "200601021504"
var start, end time.Time
var startStr, endStr string
if startInf == nil {
// Use now time create code
start = time.Now()
startStr = start.Format(format)
} else {
// use start string create code
startStr = startInf.(string)
start, _ = time.ParseInLocation(format, startStr, time.Local)
startStr = start.Format(format)
}
end = start.Add(time.Minute * time.Duration(minutes))
endStr = end.Format(format)
// create sha1 encode string
sh := sha1.New()
sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes)))
encoded := hex.EncodeToString(sh.Sum(nil))
code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded)
return code
}
// AvatarLink returns avatar link by given e-mail.
func AvatarLink(email string) string {
if setting.DisableGravatar || setting.OfflineMode {
return setting.AppSubUrl + "/img/avatar_default.jpg"
}
gravatarHash := avatar.HashEmail(email)
if setting.Service.EnableCacheAvatar {
return setting.AppSubUrl + "/avatar/" + gravatarHash
}
return setting.GravatarSource + gravatarHash
}
// Seconds-based time units
const (
Minute = 60
Hour = 60 * Minute
Day = 24 * Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
)
func computeTimeDiff(diff int64) (int64, string) {
diffStr := ""
switch {
case diff <= 0:
diff = 0
diffStr = "now"
case diff < 2:
diff = 0
diffStr = "1 second"
case diff < 1*Minute:
diffStr = fmt.Sprintf("%d seconds", diff)
diff = 0
case diff < 2*Minute:
diff -= 1 * Minute
diffStr = "1 minute"
case diff < 1*Hour:
diffStr = fmt.Sprintf("%d minutes", diff/Minute)
diff -= diff / Minute * Minute
case diff < 2*Hour:
diff -= 1 * Hour
diffStr = "1 hour"
case diff < 1*Day:
diffStr = fmt.Sprintf("%d hours", diff/Hour)
diff -= diff / Hour * Hour
case diff < 2*Day:
diff -= 1 * Day
diffStr = "1 day"
case diff < 1*Week:
diffStr = fmt.Sprintf("%d days", diff/Day)
diff -= diff / Day * Day
case diff < 2*Week:
diff -= 1 * Week
diffStr = "1 week"
case diff < 1*Month:
diffStr = fmt.Sprintf("%d weeks", diff/Week)
diff -= diff / Week * Week
case diff < 2*Month:
diff -= 1 * Month
diffStr = "1 month"
case diff < 1*Year:
diffStr = fmt.Sprintf("%d months", diff/Month)
diff -= diff / Month * Month
case diff < 2*Year:
diff -= 1 * Year
diffStr = "1 year"
default:
diffStr = fmt.Sprintf("%d years", diff/Year)
diff = 0
}
return diff, diffStr
}
// TimeSincePro calculates the time interval and generate full user-friendly string.
func TimeSincePro(then time.Time) string {
now := time.Now()
diff := now.Unix() - then.Unix()
if then.After(now) {
return "future"
}
var timeStr, diffStr string
for {
if diff == 0 {
break
}
diff, diffStr = computeTimeDiff(diff)
timeStr += ", " + diffStr
}
return strings.TrimPrefix(timeStr, ", ")
}
func timeSince(then time.Time, lang string) string {
now := time.Now()
lbl := i18n.Tr(lang, "tool.ago")
diff := now.Unix() - then.Unix()
if then.After(now) {
lbl = i18n.Tr(lang, "tool.from_now")
diff = then.Unix() - now.Unix()
}
switch {
case diff <= 0:
return i18n.Tr(lang, "tool.now")
case diff <= 2:
return i18n.Tr(lang, "tool.1s", lbl)
case diff < 1*Minute:
return i18n.Tr(lang, "tool.seconds", diff, lbl)
case diff < 2*Minute:
return i18n.Tr(lang, "tool.1m", lbl)
case diff < 1*Hour:
return i18n.Tr(lang, "tool.minutes", diff/Minute, lbl)
case diff < 2*Hour:
return i18n.Tr(lang, "tool.1h", lbl)
case diff < 1*Day:
return i18n.Tr(lang, "tool.hours", diff/Hour, lbl)
case diff < 2*Day:
return i18n.Tr(lang, "tool.1d", lbl)
case diff < 1*Week:
return i18n.Tr(lang, "tool.days", diff/Day, lbl)
case diff < 2*Week:
return i18n.Tr(lang, "tool.1w", lbl)
case diff < 1*Month:
return i18n.Tr(lang, "tool.weeks", diff/Week, lbl)
case diff < 2*Month:
return i18n.Tr(lang, "tool.1mon", lbl)
case diff < 1*Year:
return i18n.Tr(lang, "tool.months", diff/Month, lbl)
case diff < 2*Year:
return i18n.Tr(lang, "tool.1y", lbl)
default:
return i18n.Tr(lang, "tool.years", diff/Year, lbl)
}
}
func RawTimeSince(t time.Time, lang string) string {
return timeSince(t, lang)
}
// TimeSince calculates the time interval and generate user-friendly string.
func TimeSince(t time.Time, lang string) template.HTML {
return template.HTML(fmt.Sprintf(`<span class="time-since" title="%s">%s</span>`, t.Format(setting.TimeFormat), timeSince(t, lang)))
}
const (
Byte = 1
KByte = Byte * 1024
MByte = KByte * 1024
GByte = MByte * 1024
TByte = GByte * 1024
PByte = TByte * 1024
EByte = PByte * 1024
)
var bytesSizeTable = map[string]uint64{
"b": Byte,
"kb": KByte,
"mb": MByte,
"gb": GByte,
"tb": TByte,
"pb": PByte,
"eb": EByte,
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%dB", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := float64(s) / math.Pow(base, math.Floor(e))
f := "%.0f"
if val < 10 {
f = "%.1f"
}
return fmt.Sprintf(f+"%s", val, suffix)
}
// FileSize calculates the file size and generate user-friendly string.
func FileSize(s int64) string {
sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"}
return humanateBytes(uint64(s), 1024, sizes)
}
// Subtract deals with subtraction of all types of number.
func Subtract(left interface{}, right interface{}) interface{} {
var rleft, rright int64
var fleft, fright float64
var isInt bool = true
switch left.(type) {
case int:
rleft = int64(left.(int))
case int8:
rleft = int64(left.(int8))
case int16:
rleft = int64(left.(int16))
case int32:
rleft = int64(left.(int32))
case int64:
rleft = left.(int64)
case float32:
fleft = float64(left.(float32))
isInt = false
case float64:
fleft = left.(float64)
isInt = false
}
switch right.(type) {
case int:
rright = int64(right.(int))
case int8:
rright = int64(right.(int8))
case int16:
rright = int64(right.(int16))
case int32:
rright = int64(right.(int32))
case int64:
rright = right.(int64)
case float32:
fright = float64(left.(float32))
isInt = false
case float64:
fleft = left.(float64)
isInt = false
}
if isInt {
return rleft - rright
} else {
return fleft + float64(rleft) - (fright + float64(rright))
}
}
// EllipsisString returns a truncated short string,
// it appends '...' in the end of the length of string is too large.
func EllipsisString(str string, length int) string {
if len(str) < length {
return str
}
return str[:length-3] + "..."
}
// StringsToInt64s converts a slice of string to a slice of int64.
func StringsToInt64s(strs []string) []int64 {
ints := make([]int64, len(strs))
for i := range strs {
ints[i] = com.StrTo(strs[i]).MustInt64()
}
return ints
}
// Int64sToStrings converts a slice of int64 to a slice of string.
func Int64sToStrings(ints []int64) []string {
strs := make([]string, len(ints))
for i := range ints {
strs[i] = com.ToStr(ints[i])
}
return strs
}
// Int64sToMap converts a slice of int64 to a int64 map.
func Int64sToMap(ints []int64) map[int64]bool {
m := make(map[int64]bool)
for _, i := range ints {
m[i] = true
}
return m
}
| 1 | 9,995 | Why this to be a public function? | gogs-gogs | go |
@@ -21,7 +21,7 @@ module Travis
end
def set(var, value, options = {})
- cmd "export #{var}=#{value}", options.merge(log: false)
+ cmd "export #{var}=#{value}", options.merge(log: false, timing: false)
end
def echo(string, options = {}) | 1 | module Travis
module Build
module Shell
module Dsl
def script(*args, &block)
nodes << Script.new(*merge_options(args), &block)
nodes.last
end
def cmd(code, *args)
options = args.last.is_a?(Hash) ? args.last : {}
node = Cmd.new(code, *merge_options(args))
options[:fold] ? fold(options[:fold]) { raw(node) } : raw(node)
end
def raw(code, *args)
args = merge_options(args)
pos = args.last.delete(:pos) || -1
node = code.is_a?(Node) ? code : Node.new(code, *args)
nodes.insert(pos, node)
end
def set(var, value, options = {})
cmd "export #{var}=#{value}", options.merge(log: false)
end
def echo(string, options = {})
cmd "echo #{escape(string)}", echo: false, log: true
end
def cd(path)
cmd "cd #{path}", echo: true, log: false
end
def if(*args, &block)
args = merge_options(args)
els_ = args.last.delete(:else)
nodes << If.new(*args, &block)
self.else(els_, args.last) if els_
nodes.last
end
def elif(*args, &block)
raise InvalidParent.new(Elif, If, nodes.last.class) unless nodes.last.is_a?(If)
args = merge_options(args)
els_ = args.last.delete(:else)
nodes.last.raw Elif.new(*args, &block)
self.else(els_, args.last) if els_
nodes.last
end
def else(*args, &block)
raise InvalidParent.new(Else, If, nodes.last.class) unless nodes.last.is_a?(If)
nodes.last.raw Else.new(*merge_options(args), &block)
nodes.last
end
def fold(name, &block)
raw "echo -en 'travis_fold:start:#{name}\\r'"
result = yield(self)
raw "echo -en 'travis_fold:end:#{name}\\r'"
result
end
private
def merge_options(args, options = {})
options = (args.last.is_a?(Hash) ? args.pop : {}).merge(options)
args << self.options.merge(options)
end
end
end
end
end
| 1 | 11,213 | I think we can remove the timing for export env vars | travis-ci-travis-build | rb |
@@ -29,8 +29,10 @@ limitations under the License.
// limitations under the License.
Modifies:
-- Remove interface "Provider"
-- Remove import "k8s.io/kubernetes/pkg/proxy/config"
+- Replace import "k8s.io/kubernetes/pkg/proxy/config" with "github.com/vmware-tanzu/antrea/third_party/proxy/config"
+- Remove config.EndpointSliceHandler, config.NodeHandler from Provider interface type
+- Remove NodeHandler, EndpointSliceHandler, Sync() from Provider interface
+- Add Run(), GetServiceByIP() to Provider interface
*/
package proxy | 1 | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
// Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
Modifies:
- Remove interface "Provider"
- Remove import "k8s.io/kubernetes/pkg/proxy/config"
*/
package proxy
import (
"fmt"
"net"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
// ServicePortName carries a namespace + name + portname. This is the unique
// identifier for a load-balanced service.
type ServicePortName struct {
types.NamespacedName
Port string
Protocol v1.Protocol
}
func (spn ServicePortName) String() string {
return fmt.Sprintf("%s:%s", spn.NamespacedName.String(), spn.Port)
}
// ServicePort is an interface which abstracts information about a service.
type ServicePort interface {
// String returns service string. An example format can be: `IP:Port/Protocol`.
String() string
// GetClusterIP returns service cluster IP in net.IP format.
ClusterIP() net.IP
// GetPort returns service port if present. If return 0 means not present.
Port() int
// GetSessionAffinityType returns service session affinity type
SessionAffinityType() v1.ServiceAffinity
// GetStickyMaxAgeSeconds returns service max connection age
StickyMaxAgeSeconds() int
// ExternalIPStrings returns service ExternalIPs as a string array.
ExternalIPStrings() []string
// LoadBalancerIPStrings returns service LoadBalancerIPs as a string array.
LoadBalancerIPStrings() []string
// GetProtocol returns service protocol.
Protocol() v1.Protocol
// LoadBalancerSourceRanges returns service LoadBalancerSourceRanges if present empty array if not
LoadBalancerSourceRanges() []string
// GetHealthCheckNodePort returns service health check node port if present. If return 0, it means not present.
HealthCheckNodePort() int
// GetNodePort returns a service Node port if present. If return 0, it means not present.
NodePort() int
// GetOnlyNodeLocalEndpoints returns if a service has only node local endpoints
OnlyNodeLocalEndpoints() bool
// TopologyKeys returns service TopologyKeys as a string array.
TopologyKeys() []string
}
// Endpoint in an interface which abstracts information about an endpoint.
// TODO: Rename functions to be consistent with ServicePort.
type Endpoint interface {
// String returns endpoint string. An example format can be: `IP:Port`.
// We take the returned value as ServiceEndpoint.Endpoint.
String() string
// GetIsLocal returns true if the endpoint is running in same host as kube-proxy, otherwise returns false.
GetIsLocal() bool
// GetTopology returns the topology information of the endpoint.
GetTopology() map[string]string
// IP returns IP part of the endpoint.
IP() string
// Port returns the Port part of the endpoint.
Port() (int, error)
// Equal checks if two endpoints are equal.
Equal(Endpoint) bool
}
// ServiceEndpoint is used to identify a service and one of its endpoint pair.
type ServiceEndpoint struct {
Endpoint string
ServicePortName ServicePortName
}
| 1 | 23,019 | wrong import grouping | antrea-io-antrea | go |
@@ -232,8 +232,7 @@ public class PreferenceController implements SharedPreferences.OnSharedPreferenc
if (newValue.equals("page")) {
final Context context = ui.getActivity();
final String[] navTitles = context.getResources().getStringArray(R.array.back_button_go_to_pages);
- final String[] navTags = new String[3];
- System.arraycopy(MainActivity.NAV_DRAWER_TAGS, 0, navTags, 0, 3);
+ final String[] navTags = context.getResources().getStringArray(R.array.back_button_go_to_pages_tags);
final String choice[] = { UserPreferences.getBackButtonGoToPage() };
AlertDialog.Builder builder = new AlertDialog.Builder(context); | 1 | package de.danoeh.antennapod.preferences;
import android.Manifest;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.app.ProgressDialog;
import android.app.TimePickerDialog;
import android.content.ActivityNotFoundException;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.content.res.Resources;
import android.net.Uri;
import android.net.wifi.WifiConfiguration;
import android.net.wifi.WifiManager;
import android.os.Build;
import android.os.Bundle;
import android.support.design.widget.Snackbar;
import android.support.v4.app.ActivityCompat;
import android.support.v4.content.FileProvider;
import android.support.v7.app.AlertDialog;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.preference.CheckBoxPreference;
import android.support.v7.preference.ListPreference;
import android.support.v7.preference.Preference;
import android.support.v7.preference.PreferenceFragmentCompat;
import android.support.v7.preference.PreferenceManager;
import android.support.v7.preference.PreferenceScreen;
import android.text.Html;
import android.text.format.DateFormat;
import android.text.format.DateUtils;
import android.util.Log;
import android.widget.ListView;
import android.widget.Toast;
import com.afollestad.materialdialogs.MaterialDialog;
import com.bytehamster.lib.preferencesearch.SearchConfiguration;
import com.bytehamster.lib.preferencesearch.SearchPreference;
import org.apache.commons.lang3.ArrayUtils;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collections;
import java.util.GregorianCalendar;
import java.util.List;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.CrashReportWriter;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.activity.AboutActivity;
import de.danoeh.antennapod.activity.DirectoryChooserActivity;
import de.danoeh.antennapod.activity.ImportExportActivity;
import de.danoeh.antennapod.activity.MainActivity;
import de.danoeh.antennapod.activity.MediaplayerActivity;
import de.danoeh.antennapod.activity.OpmlImportFromPathActivity;
import de.danoeh.antennapod.activity.PreferenceActivity;
import de.danoeh.antennapod.activity.StatisticsActivity;
import de.danoeh.antennapod.asynctask.ExportWorker;
import de.danoeh.antennapod.core.export.ExportWriter;
import de.danoeh.antennapod.core.export.html.HtmlWriter;
import de.danoeh.antennapod.core.export.opml.OpmlWriter;
import de.danoeh.antennapod.core.preferences.GpodnetPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.GpodnetSyncService;
import de.danoeh.antennapod.core.util.flattr.FlattrUtils;
import de.danoeh.antennapod.core.util.gui.PictureInPictureUtil;
import de.danoeh.antennapod.dialog.AuthenticationDialog;
import de.danoeh.antennapod.dialog.AutoFlattrPreferenceDialog;
import de.danoeh.antennapod.dialog.ChooseDataFolderDialog;
import de.danoeh.antennapod.dialog.GpodnetSetHostnameDialog;
import de.danoeh.antennapod.dialog.ProxyDialog;
import de.danoeh.antennapod.dialog.VariableSpeedDialog;
import rx.Observable;
import rx.Subscription;
import rx.android.schedulers.AndroidSchedulers;
import rx.schedulers.Schedulers;
import static de.danoeh.antennapod.activity.PreferenceActivity.PARAM_RESOURCE;
/**
* Sets up a preference UI that lets the user change user preferences.
*/
public class PreferenceController implements SharedPreferences.OnSharedPreferenceChangeListener {
private static final String TAG = "PreferenceController";
private static final String PREF_SCREEN_USER_INTERFACE = "prefScreenInterface";
private static final String PREF_SCREEN_PLAYBACK = "prefScreenPlayback";
private static final String PREF_SCREEN_NETWORK = "prefScreenNetwork";
private static final String PREF_SCREEN_INTEGRATIONS = "prefScreenIntegrations";
private static final String PREF_SCREEN_STORAGE = "prefScreenStorage";
private static final String PREF_SCREEN_AUTODL = "prefAutoDownloadSettings";
private static final String PREF_SCREEN_FLATTR = "prefFlattrSettings";
private static final String PREF_SCREEN_GPODDER = "prefGpodderSettings";
private static final String PREF_FLATTR_AUTH = "pref_flattr_authenticate";
private static final String PREF_FLATTR_REVOKE = "prefRevokeAccess";
private static final String PREF_AUTO_FLATTR_PREFS = "prefAutoFlattrPrefs";
private static final String PREF_OPML_EXPORT = "prefOpmlExport";
private static final String PREF_OPML_IMPORT = "prefOpmlImport";
private static final String PREF_HTML_EXPORT = "prefHtmlExport";
private static final String STATISTICS = "statistics";
private static final String IMPORT_EXPORT = "importExport";
private static final String PREF_ABOUT = "prefAbout";
private static final String PREF_CHOOSE_DATA_DIR = "prefChooseDataDir";
private static final String PREF_PLAYBACK_SPEED_LAUNCHER = "prefPlaybackSpeedLauncher";
private static final String PREF_PLAYBACK_REWIND_DELTA_LAUNCHER = "prefPlaybackRewindDeltaLauncher";
private static final String PREF_PLAYBACK_FAST_FORWARD_DELTA_LAUNCHER = "prefPlaybackFastForwardDeltaLauncher";
private static final String PREF_GPODNET_LOGIN = "pref_gpodnet_authenticate";
private static final String PREF_GPODNET_SETLOGIN_INFORMATION = "pref_gpodnet_setlogin_information";
private static final String PREF_GPODNET_SYNC = "pref_gpodnet_sync";
private static final String PREF_GPODNET_FORCE_FULL_SYNC = "pref_gpodnet_force_full_sync";
private static final String PREF_GPODNET_LOGOUT = "pref_gpodnet_logout";
private static final String PREF_GPODNET_HOSTNAME = "pref_gpodnet_hostname";
private static final String PREF_GPODNET_NOTIFICATIONS = "pref_gpodnet_notifications";
private static final String PREF_EXPANDED_NOTIFICATION = "prefExpandNotify";
private static final String PREF_PROXY = "prefProxy";
private static final String PREF_KNOWN_ISSUES = "prefKnownIssues";
private static final String PREF_FAQ = "prefFaq";
private static final String PREF_SEND_CRASH_REPORT = "prefSendCrashReport";
private static final String[] EXTERNAL_STORAGE_PERMISSIONS = {
Manifest.permission.READ_EXTERNAL_STORAGE,
Manifest.permission.WRITE_EXTERNAL_STORAGE };
private static final int PERMISSION_REQUEST_EXTERNAL_STORAGE = 41;
private final PreferenceUI ui;
private final SharedPreferences.OnSharedPreferenceChangeListener gpoddernetListener =
(sharedPreferences, key) -> {
if (GpodnetPreferences.PREF_LAST_SYNC_ATTEMPT_TIMESTAMP.equals(key)) {
updateLastGpodnetSyncReport(GpodnetPreferences.getLastSyncAttemptResult(),
GpodnetPreferences.getLastSyncAttemptTimestamp());
}
};
private CheckBoxPreference[] selectedNetworks;
private Subscription subscription;
public PreferenceController(PreferenceUI ui) {
this.ui = ui;
PreferenceManager.getDefaultSharedPreferences(ui.getActivity().getApplicationContext())
.registerOnSharedPreferenceChangeListener(this);
}
@Override
public void onSharedPreferenceChanged(SharedPreferences sharedPreferences, String key) {
}
public void onCreate(int screen) {
switch (screen) {
case R.xml.preferences:
setupMainScreen();
break;
case R.xml.preferences_network:
setupNetworkScreen();
break;
case R.xml.preferences_autodownload:
setupAutoDownloadScreen();
buildAutodownloadSelectedNetworksPreference();
setSelectedNetworksEnabled(UserPreferences.isEnableAutodownloadWifiFilter());
buildEpisodeCleanupPreference();
break;
case R.xml.preferences_playback:
setupPlaybackScreen();
PreferenceControllerFlavorHelper.setupFlavoredUI(ui);
buildSmartMarkAsPlayedPreference();
break;
case R.xml.preferences_integrations:
setupIntegrationsScreen();
break;
case R.xml.preferences_flattr:
setupFlattrScreen();
break;
case R.xml.preferences_gpodder:
setupGpodderScreen();
break;
case R.xml.preferences_storage:
setupStorageScreen();
break;
case R.xml.preferences_user_interface:
setupInterfaceScreen();
break;
}
}
private void setupInterfaceScreen() {
final Activity activity = ui.getActivity();
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN) {
// disable expanded notification option on unsupported android versions
ui.findPreference(PreferenceController.PREF_EXPANDED_NOTIFICATION).setEnabled(false);
ui.findPreference(PreferenceController.PREF_EXPANDED_NOTIFICATION).setOnPreferenceClickListener(
preference -> {
Toast toast = Toast.makeText(activity,
R.string.pref_expand_notify_unsupport_toast, Toast.LENGTH_SHORT);
toast.show();
return true;
}
);
}
ui.findPreference(UserPreferences.PREF_THEME)
.setOnPreferenceChangeListener(
(preference, newValue) -> {
Intent i = new Intent(activity, MainActivity.class);
i.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK
| Intent.FLAG_ACTIVITY_NEW_TASK);
activity.finish();
activity.startActivity(i);
return true;
}
);
ui.findPreference(UserPreferences.PREF_HIDDEN_DRAWER_ITEMS)
.setOnPreferenceClickListener(preference -> {
showDrawerPreferencesDialog();
return true;
});
ui.findPreference(UserPreferences.PREF_COMPACT_NOTIFICATION_BUTTONS)
.setOnPreferenceClickListener(preference -> {
showNotificationButtonsDialog();
return true;
});
ui.findPreference(UserPreferences.PREF_BACK_BUTTON_BEHAVIOR)
.setOnPreferenceChangeListener((preference, newValue) -> {
if (newValue.equals("page")) {
final Context context = ui.getActivity();
final String[] navTitles = context.getResources().getStringArray(R.array.back_button_go_to_pages);
final String[] navTags = new String[3];
System.arraycopy(MainActivity.NAV_DRAWER_TAGS, 0, navTags, 0, 3);
final String choice[] = { UserPreferences.getBackButtonGoToPage() };
AlertDialog.Builder builder = new AlertDialog.Builder(context);
builder.setTitle(R.string.back_button_go_to_page_title);
builder.setSingleChoiceItems(navTitles, ArrayUtils.indexOf(navTags, UserPreferences.getBackButtonGoToPage()), (dialogInterface, i) -> {
if (i >= 0) {
choice[0] = navTags[i];
}
});
builder.setPositiveButton(R.string.confirm_label, (dialogInterface, i) -> {
if (!choice[0].equals(UserPreferences.getBackButtonGoToPage())) {
UserPreferences.setBackButtonGoToPage(choice[0]);
}
});
builder.setNegativeButton(R.string.cancel_label, null);
builder.create().show();
return true;
} else {
return true;
}
});
if (Build.VERSION.SDK_INT >= 26) {
ui.findPreference(UserPreferences.PREF_EXPANDED_NOTIFICATION).setVisible(false);
}
}
private void setupStorageScreen() {
final Activity activity = ui.getActivity();
ui.findPreference(PreferenceController.IMPORT_EXPORT).setOnPreferenceClickListener(
preference -> {
activity.startActivity(new Intent(activity, ImportExportActivity.class));
return true;
}
);
ui.findPreference(PreferenceController.PREF_OPML_EXPORT).setOnPreferenceClickListener(
preference -> export(new OpmlWriter()));
ui.findPreference(PreferenceController.PREF_HTML_EXPORT).setOnPreferenceClickListener(
preference -> export(new HtmlWriter()));
ui.findPreference(PreferenceController.PREF_OPML_IMPORT).setOnPreferenceClickListener(
preference -> {
activity.startActivity(new Intent(activity, OpmlImportFromPathActivity.class));
return true;
});
ui.findPreference(PreferenceController.PREF_CHOOSE_DATA_DIR).setOnPreferenceClickListener(
preference -> {
if (Build.VERSION_CODES.KITKAT <= Build.VERSION.SDK_INT &&
Build.VERSION.SDK_INT <= Build.VERSION_CODES.LOLLIPOP_MR1) {
showChooseDataFolderDialog();
} else {
int readPermission = ActivityCompat.checkSelfPermission(
activity, Manifest.permission.READ_EXTERNAL_STORAGE);
int writePermission = ActivityCompat.checkSelfPermission(
activity, Manifest.permission.WRITE_EXTERNAL_STORAGE);
if (readPermission == PackageManager.PERMISSION_GRANTED &&
writePermission == PackageManager.PERMISSION_GRANTED) {
openDirectoryChooser();
} else {
requestPermission();
}
}
return true;
}
);
ui.findPreference(PreferenceController.PREF_CHOOSE_DATA_DIR)
.setOnPreferenceClickListener(
preference -> {
if (Build.VERSION.SDK_INT >= 19) {
showChooseDataFolderDialog();
} else {
Intent intent = new Intent(activity, DirectoryChooserActivity.class);
activity.startActivityForResult(intent,
DirectoryChooserActivity.RESULT_CODE_DIR_SELECTED);
}
return true;
}
);
ui.findPreference(UserPreferences.PREF_IMAGE_CACHE_SIZE).setOnPreferenceChangeListener(
(preference, o) -> {
if (o instanceof String) {
int newValue = Integer.parseInt((String) o) * 1024 * 1024;
if (newValue != UserPreferences.getImageCacheSize()) {
AlertDialog.Builder dialog = new AlertDialog.Builder(ui.getActivity());
dialog.setTitle(android.R.string.dialog_alert_title);
dialog.setMessage(R.string.pref_restart_required);
dialog.setPositiveButton(android.R.string.ok, null);
dialog.show();
}
return true;
}
return false;
}
);
}
private void setupIntegrationsScreen() {
final AppCompatActivity activity = ui.getActivity();
ui.findPreference(PREF_SCREEN_FLATTR).setOnPreferenceClickListener(preference -> {
openScreen(R.xml.preferences_flattr, activity);
return true;
});
ui.findPreference(PREF_SCREEN_GPODDER).setOnPreferenceClickListener(preference -> {
openScreen(R.xml.preferences_gpodder, activity);
return true;
});
}
private void setupFlattrScreen() {
final AppCompatActivity activity = ui.getActivity();
ui.findPreference(PreferenceController.PREF_FLATTR_REVOKE).setOnPreferenceClickListener(
preference -> {
FlattrUtils.revokeAccessToken(activity);
checkFlattrItemVisibility();
return true;
}
);
ui.findPreference(PreferenceController.PREF_AUTO_FLATTR_PREFS)
.setOnPreferenceClickListener(preference -> {
AutoFlattrPreferenceDialog.newAutoFlattrPreferenceDialog(activity,
new AutoFlattrPreferenceDialog.AutoFlattrPreferenceDialogInterface() {
@Override
public void onCancelled() {
}
@Override
public void onConfirmed(boolean autoFlattrEnabled, float autoFlattrValue) {
UserPreferences.setAutoFlattrSettings(autoFlattrEnabled, autoFlattrValue);
checkFlattrItemVisibility();
}
});
return true;
});
}
private void setupGpodderScreen() {
final AppCompatActivity activity = ui.getActivity();
ui.findPreference(PreferenceController.PREF_GPODNET_SETLOGIN_INFORMATION)
.setOnPreferenceClickListener(preference -> {
AuthenticationDialog dialog = new AuthenticationDialog(activity,
R.string.pref_gpodnet_setlogin_information_title, false, false, GpodnetPreferences.getUsername(),
null) {
@Override
protected void onConfirmed(String username, String password, boolean saveUsernamePassword) {
GpodnetPreferences.setPassword(password);
}
};
dialog.show();
return true;
});
ui.findPreference(PreferenceController.PREF_GPODNET_SYNC).
setOnPreferenceClickListener(preference -> {
GpodnetSyncService.sendSyncIntent(ui.getActivity().getApplicationContext());
Toast toast = Toast.makeText(ui.getActivity(), R.string.pref_gpodnet_sync_started,
Toast.LENGTH_SHORT);
toast.show();
return true;
});
ui.findPreference(PreferenceController.PREF_GPODNET_FORCE_FULL_SYNC).
setOnPreferenceClickListener(preference -> {
GpodnetPreferences.setLastSubscriptionSyncTimestamp(0L);
GpodnetPreferences.setLastEpisodeActionsSyncTimestamp(0L);
GpodnetPreferences.setLastSyncAttempt(false, 0);
updateLastGpodnetSyncReport(false, 0);
GpodnetSyncService.sendSyncIntent(ui.getActivity().getApplicationContext());
Toast toast = Toast.makeText(ui.getActivity(), R.string.pref_gpodnet_sync_started,
Toast.LENGTH_SHORT);
toast.show();
return true;
});
ui.findPreference(PreferenceController.PREF_GPODNET_LOGOUT).setOnPreferenceClickListener(
preference -> {
GpodnetPreferences.logout();
Toast toast = Toast.makeText(activity, R.string.pref_gpodnet_logout_toast, Toast.LENGTH_SHORT);
toast.show();
updateGpodnetPreferenceScreen();
return true;
});
ui.findPreference(PreferenceController.PREF_GPODNET_HOSTNAME).setOnPreferenceClickListener(
preference -> {
GpodnetSetHostnameDialog.createDialog(activity).setOnDismissListener(dialog -> updateGpodnetPreferenceScreen());
return true;
});
}
private void setupPlaybackScreen() {
final Activity activity = ui.getActivity();
ui.findPreference(PreferenceController.PREF_PLAYBACK_SPEED_LAUNCHER)
.setOnPreferenceClickListener(preference -> {
VariableSpeedDialog.showDialog(activity);
return true;
});
ui.findPreference(PreferenceController.PREF_PLAYBACK_REWIND_DELTA_LAUNCHER)
.setOnPreferenceClickListener(preference -> {
MediaplayerActivity.showSkipPreference(activity, MediaplayerActivity.SkipDirection.SKIP_REWIND);
return true;
});
ui.findPreference(PreferenceController.PREF_PLAYBACK_FAST_FORWARD_DELTA_LAUNCHER)
.setOnPreferenceClickListener(preference -> {
MediaplayerActivity.showSkipPreference(activity, MediaplayerActivity.SkipDirection.SKIP_FORWARD);
return true;
});
if (!PictureInPictureUtil.supportsPictureInPicture(activity)) {
ListPreference behaviour = (ListPreference) ui.findPreference(UserPreferences.PREF_VIDEO_BEHAVIOR);
behaviour.setEntries(R.array.video_background_behavior_options_without_pip);
behaviour.setEntryValues(R.array.video_background_behavior_values_without_pip);
}
}
private void setupAutoDownloadScreen() {
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL).setOnPreferenceChangeListener(
(preference, newValue) -> {
if (newValue instanceof Boolean) {
checkAutodownloadItemVisibility((Boolean) newValue);
}
return true;
});
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL_WIFI_FILTER)
.setOnPreferenceChangeListener(
(preference, newValue) -> {
if (newValue instanceof Boolean) {
setSelectedNetworksEnabled((Boolean) newValue);
return true;
} else {
return false;
}
}
);
ui.findPreference(UserPreferences.PREF_EPISODE_CACHE_SIZE)
.setOnPreferenceChangeListener(
(preference, o) -> {
if (o instanceof String) {
setEpisodeCacheSizeText(UserPreferences.readEpisodeCacheSize((String) o));
}
return true;
}
);
}
private void setupNetworkScreen() {
final AppCompatActivity activity = ui.getActivity();
ui.findPreference(PREF_SCREEN_AUTODL).setOnPreferenceClickListener(preference -> {
openScreen(R.xml.preferences_autodownload, activity);
return true;
});
ui.findPreference(UserPreferences.PREF_UPDATE_INTERVAL)
.setOnPreferenceClickListener(preference -> {
showUpdateIntervalTimePreferencesDialog();
return true;
});
ui.findPreference(UserPreferences.PREF_PARALLEL_DOWNLOADS)
.setOnPreferenceChangeListener(
(preference, o) -> {
if (o instanceof Integer) {
setParallelDownloadsText((Integer) o);
}
return true;
}
);
// validate and set correct value: number of downloads between 1 and 50 (inclusive)
ui.findPreference(PREF_PROXY).setOnPreferenceClickListener(preference -> {
ProxyDialog dialog = new ProxyDialog(ui.getActivity());
dialog.createDialog().show();
return true;
});
}
private void setupMainScreen() {
final AppCompatActivity activity = ui.getActivity();
setupSearch();
ui.findPreference(PREF_SCREEN_USER_INTERFACE).setOnPreferenceClickListener(preference -> {
openScreen(R.xml.preferences_user_interface, activity);
return true;
});
ui.findPreference(PREF_SCREEN_PLAYBACK).setOnPreferenceClickListener(preference -> {
openScreen(R.xml.preferences_playback, activity);
return true;
});
ui.findPreference(PREF_SCREEN_NETWORK).setOnPreferenceClickListener(preference -> {
openScreen(R.xml.preferences_network, activity);
return true;
});
ui.findPreference(PREF_SCREEN_INTEGRATIONS).setOnPreferenceClickListener(preference -> {
openScreen(R.xml.preferences_integrations, activity);
return true;
});
ui.findPreference(PREF_SCREEN_STORAGE).setOnPreferenceClickListener(preference -> {
openScreen(R.xml.preferences_storage, activity);
return true;
});
ui.findPreference(PreferenceController.PREF_ABOUT).setOnPreferenceClickListener(
preference -> {
activity.startActivity(new Intent(activity, AboutActivity.class));
return true;
}
);
ui.findPreference(PreferenceController.STATISTICS).setOnPreferenceClickListener(
preference -> {
activity.startActivity(new Intent(activity, StatisticsActivity.class));
return true;
}
);
ui.findPreference(PREF_KNOWN_ISSUES).setOnPreferenceClickListener(preference -> {
openInBrowser("https://github.com/AntennaPod/AntennaPod/labels/bug");
return true;
});
ui.findPreference(PREF_FAQ).setOnPreferenceClickListener(preference -> {
openInBrowser("http://antennapod.org/faq.html");
return true;
});
ui.findPreference(PREF_SEND_CRASH_REPORT).setOnPreferenceClickListener(preference -> {
Context context = ui.getActivity().getApplicationContext();
Intent emailIntent = new Intent(Intent.ACTION_SEND);
emailIntent.setType("text/plain");
emailIntent.putExtra(Intent.EXTRA_EMAIL, new String[]{"[email protected]"});
emailIntent.putExtra(Intent.EXTRA_SUBJECT, "AntennaPod Crash Report");
emailIntent.putExtra(Intent.EXTRA_TEXT, "Please describe what you were doing when the app crashed");
// the attachment
Uri fileUri = FileProvider.getUriForFile(context, context.getString(R.string.provider_authority),
CrashReportWriter.getFile());
emailIntent.putExtra(Intent.EXTRA_STREAM, fileUri);
emailIntent.setFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION);
String intentTitle = ui.getActivity().getString(R.string.send_email);
if (Build.VERSION.SDK_INT <= Build.VERSION_CODES.KITKAT) {
List<ResolveInfo> resInfoList = context.getPackageManager().queryIntentActivities(emailIntent, PackageManager.MATCH_DEFAULT_ONLY);
for (ResolveInfo resolveInfo : resInfoList) {
String packageName = resolveInfo.activityInfo.packageName;
context.grantUriPermission(packageName, fileUri, Intent.FLAG_GRANT_READ_URI_PERMISSION);
}
}
ui.getActivity().startActivity(Intent.createChooser(emailIntent, intentTitle));
return true;
});
}
private void setupSearch() {
final AppCompatActivity activity = ui.getActivity();
SearchPreference searchPreference = (SearchPreference) ui.findPreference("searchPreference");
SearchConfiguration config = searchPreference.getSearchConfiguration();
config.setActivity(activity);
config.setFragmentContainerViewId(R.id.content);
config.setBreadcrumbsEnabled(true);
config.index()
.addBreadcrumb(getTitleOfPage(R.xml.preferences_user_interface))
.addFile(R.xml.preferences_user_interface);
config.index()
.addBreadcrumb(getTitleOfPage(R.xml.preferences_playback))
.addFile(R.xml.preferences_playback);
config.index()
.addBreadcrumb(getTitleOfPage(R.xml.preferences_network))
.addFile(R.xml.preferences_network);
config.index()
.addBreadcrumb(getTitleOfPage(R.xml.preferences_storage))
.addFile(R.xml.preferences_storage);
config.index()
.addBreadcrumb(getTitleOfPage(R.xml.preferences_network))
.addBreadcrumb(R.string.automation)
.addBreadcrumb(getTitleOfPage(R.xml.preferences_autodownload))
.addFile(R.xml.preferences_autodownload);
config.index()
.addBreadcrumb(getTitleOfPage(R.xml.preferences_integrations))
.addBreadcrumb(getTitleOfPage(R.xml.preferences_gpodder))
.addFile(R.xml.preferences_gpodder);
config.index()
.addBreadcrumb(getTitleOfPage(R.xml.preferences_integrations))
.addBreadcrumb(getTitleOfPage(R.xml.preferences_flattr))
.addFile(R.xml.preferences_flattr);
}
public PreferenceFragmentCompat openScreen(int preferences, AppCompatActivity activity) {
PreferenceFragmentCompat prefFragment = new PreferenceActivity.MainFragment();
Bundle args = new Bundle();
args.putInt(PARAM_RESOURCE, preferences);
prefFragment.setArguments(args);
activity.getSupportFragmentManager().beginTransaction()
.replace(R.id.content, prefFragment)
.addToBackStack(TAG).commit();
return prefFragment;
}
public static int getTitleOfPage(int preferences) {
switch (preferences) {
case R.xml.preferences_network:
return R.string.network_pref;
case R.xml.preferences_autodownload:
return R.string.pref_automatic_download_title;
case R.xml.preferences_playback:
return R.string.playback_pref;
case R.xml.preferences_storage:
return R.string.storage_pref;
case R.xml.preferences_user_interface:
return R.string.user_interface_label;
case R.xml.preferences_integrations:
return R.string.integrations_label;
case R.xml.preferences_flattr:
return R.string.flattr_label;
case R.xml.preferences_gpodder:
return R.string.gpodnet_main_label;
default:
return R.string.settings_label;
}
}
private boolean export(ExportWriter exportWriter) {
Context context = ui.getActivity();
final ProgressDialog progressDialog = new ProgressDialog(context);
progressDialog.setMessage(context.getString(R.string.exporting_label));
progressDialog.setIndeterminate(true);
progressDialog.show();
final AlertDialog.Builder alert = new AlertDialog.Builder(context)
.setNeutralButton(android.R.string.ok, (dialog, which) -> dialog.dismiss());
Observable<File> observable = new ExportWorker(exportWriter).exportObservable();
subscription = observable.subscribeOn(Schedulers.newThread())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(output -> {
alert.setTitle(R.string.export_success_title);
String message = context.getString(R.string.export_success_sum, output.toString());
alert.setMessage(message);
alert.setPositiveButton(R.string.send_label, (dialog, which) -> {
Uri fileUri = FileProvider.getUriForFile(context.getApplicationContext(),
"de.danoeh.antennapod.provider", output);
Intent sendIntent = new Intent(Intent.ACTION_SEND);
sendIntent.putExtra(Intent.EXTRA_SUBJECT,
context.getResources().getText(R.string.opml_export_label));
sendIntent.putExtra(Intent.EXTRA_STREAM, fileUri);
sendIntent.setType("text/plain");
sendIntent.addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION);
if (Build.VERSION.SDK_INT <= Build.VERSION_CODES.KITKAT) {
List<ResolveInfo> resInfoList = context.getPackageManager().queryIntentActivities(sendIntent, PackageManager.MATCH_DEFAULT_ONLY);
for (ResolveInfo resolveInfo : resInfoList) {
String packageName = resolveInfo.activityInfo.packageName;
context.grantUriPermission(packageName, fileUri, Intent.FLAG_GRANT_READ_URI_PERMISSION);
}
}
context.startActivity(Intent.createChooser(sendIntent,
context.getResources().getText(R.string.send_label)));
});
alert.create().show();
}, error -> {
alert.setTitle(R.string.export_error_label);
alert.setMessage(error.getMessage());
alert.show();
}, progressDialog::dismiss);
return true;
}
private void openInBrowser(String url) {
try {
Intent myIntent = new Intent(Intent.ACTION_VIEW, Uri.parse(url));
ui.getActivity().startActivity(myIntent);
} catch (ActivityNotFoundException e) {
Toast.makeText(ui.getActivity(), R.string.pref_no_browser_found, Toast.LENGTH_LONG).show();
Log.e(TAG, Log.getStackTraceString(e));
}
}
public void onResume(int screen) {
switch (screen) {
case R.xml.preferences_network:
setUpdateIntervalText();
setParallelDownloadsText(UserPreferences.getParallelDownloads());
break;
case R.xml.preferences_autodownload:
setEpisodeCacheSizeText(UserPreferences.getEpisodeCacheSize());
checkAutodownloadItemVisibility(UserPreferences.isEnableAutodownload());
break;
case R.xml.preferences_storage:
setDataFolderText();
break;
case R.xml.preferences_integrations:
setIntegrationsItemVisibility();
return;
case R.xml.preferences_flattr:
checkFlattrItemVisibility();
break;
case R.xml.preferences_gpodder:
GpodnetPreferences.registerOnSharedPreferenceChangeListener(gpoddernetListener);
updateGpodnetPreferenceScreen();
break;
case R.xml.preferences_playback:
checkSonicItemVisibility();
break;
}
}
public void unregisterGpodnet() {
GpodnetPreferences.unregisterOnSharedPreferenceChangeListener(gpoddernetListener);
}
public void unsubscribeExportSubscription() {
if (subscription != null) {
subscription.unsubscribe();
}
}
@SuppressLint("NewApi")
public void onActivityResult(int requestCode, int resultCode, Intent data) {
if (resultCode == Activity.RESULT_OK &&
requestCode == DirectoryChooserActivity.RESULT_CODE_DIR_SELECTED) {
String dir = data.getStringExtra(DirectoryChooserActivity.RESULT_SELECTED_DIR);
File path;
if(dir != null) {
path = new File(dir);
} else {
path = ui.getActivity().getExternalFilesDir(null);
}
String message = null;
final Context context= ui.getActivity().getApplicationContext();
if(!path.exists()) {
message = String.format(context.getString(R.string.folder_does_not_exist_error), dir);
} else if(!path.canRead()) {
message = String.format(context.getString(R.string.folder_not_readable_error), dir);
} else if(!path.canWrite()) {
message = String.format(context.getString(R.string.folder_not_writable_error), dir);
}
if(message == null) {
Log.d(TAG, "Setting data folder: " + dir);
UserPreferences.setDataFolder(dir);
setDataFolderText();
} else {
AlertDialog.Builder ab = new AlertDialog.Builder(ui.getActivity());
ab.setMessage(message);
ab.setPositiveButton(android.R.string.ok, null);
ab.show();
}
}
}
private void updateGpodnetPreferenceScreen() {
final boolean loggedIn = GpodnetPreferences.loggedIn();
ui.findPreference(PreferenceController.PREF_GPODNET_LOGIN).setEnabled(!loggedIn);
ui.findPreference(PreferenceController.PREF_GPODNET_SETLOGIN_INFORMATION).setEnabled(loggedIn);
ui.findPreference(PreferenceController.PREF_GPODNET_SYNC).setEnabled(loggedIn);
ui.findPreference(PreferenceController.PREF_GPODNET_FORCE_FULL_SYNC).setEnabled(loggedIn);
ui.findPreference(PreferenceController.PREF_GPODNET_LOGOUT).setEnabled(loggedIn);
ui.findPreference(PREF_GPODNET_NOTIFICATIONS).setEnabled(loggedIn);
if(loggedIn) {
String format = ui.getActivity().getString(R.string.pref_gpodnet_login_status);
String summary = String.format(format, GpodnetPreferences.getUsername(),
GpodnetPreferences.getDeviceID());
ui.findPreference(PreferenceController.PREF_GPODNET_LOGOUT).setSummary(Html.fromHtml(summary));
updateLastGpodnetSyncReport(GpodnetPreferences.getLastSyncAttemptResult(),
GpodnetPreferences.getLastSyncAttemptTimestamp());
} else {
ui.findPreference(PreferenceController.PREF_GPODNET_LOGOUT).setSummary(null);
updateLastGpodnetSyncReport(false, 0);
}
ui.findPreference(PreferenceController.PREF_GPODNET_HOSTNAME).setSummary(GpodnetPreferences.getHostname());
}
private void updateLastGpodnetSyncReport(boolean successful, long lastTime) {
Preference sync = ui.findPreference(PREF_GPODNET_SYNC);
if (lastTime != 0) {
sync.setSummary(ui.getActivity().getString(R.string.pref_gpodnet_sync_changes_sum) + "\n" +
ui.getActivity().getString(R.string.pref_gpodnet_sync_sum_last_sync_line,
ui.getActivity().getString(successful ?
R.string.gpodnetsync_pref_report_successful :
R.string.gpodnetsync_pref_report_failed),
DateUtils.getRelativeDateTimeString(ui.getActivity(),
lastTime,
DateUtils.MINUTE_IN_MILLIS,
DateUtils.WEEK_IN_MILLIS,
DateUtils.FORMAT_SHOW_TIME)));
} else {
sync.setSummary(ui.getActivity().getString(R.string.pref_gpodnet_sync_changes_sum));
}
}
private String[] getUpdateIntervalEntries(final String[] values) {
final Resources res = ui.getActivity().getResources();
String[] entries = new String[values.length];
for (int x = 0; x < values.length; x++) {
Integer v = Integer.parseInt(values[x]);
switch (v) {
case 0:
entries[x] = res.getString(R.string.pref_update_interval_hours_manual);
break;
case 1:
entries[x] = v + " " + res.getString(R.string.pref_update_interval_hours_singular);
break;
default:
entries[x] = v + " " + res.getString(R.string.pref_update_interval_hours_plural);
break;
}
}
return entries;
}
private void buildEpisodeCleanupPreference() {
final Resources res = ui.getActivity().getResources();
ListPreference pref = (ListPreference) ui.findPreference(UserPreferences.PREF_EPISODE_CLEANUP);
String[] values = res.getStringArray(
R.array.episode_cleanup_values);
String[] entries = new String[values.length];
for (int x = 0; x < values.length; x++) {
int v = Integer.parseInt(values[x]);
if (v == UserPreferences.EPISODE_CLEANUP_QUEUE) {
entries[x] = res.getString(R.string.episode_cleanup_queue_removal);
} else if (v == UserPreferences.EPISODE_CLEANUP_NULL){
entries[x] = res.getString(R.string.episode_cleanup_never);
} else if (v == 0) {
entries[x] = res.getString(R.string.episode_cleanup_after_listening);
} else {
entries[x] = res.getQuantityString(R.plurals.episode_cleanup_days_after_listening, v, v);
}
}
pref.setEntries(entries);
}
private void buildSmartMarkAsPlayedPreference() {
final Resources res = ui.getActivity().getResources();
ListPreference pref = (ListPreference) ui.findPreference(UserPreferences.PREF_SMART_MARK_AS_PLAYED_SECS);
String[] values = res.getStringArray(R.array.smart_mark_as_played_values);
String[] entries = new String[values.length];
for (int x = 0; x < values.length; x++) {
if(x == 0) {
entries[x] = res.getString(R.string.pref_smart_mark_as_played_disabled);
} else {
Integer v = Integer.parseInt(values[x]);
if(v < 60) {
entries[x] = res.getQuantityString(R.plurals.time_seconds_quantified, v, v);
} else {
v /= 60;
entries[x] = res.getQuantityString(R.plurals.time_minutes_quantified, v, v);
}
}
}
pref.setEntries(entries);
}
private void setSelectedNetworksEnabled(boolean b) {
if (selectedNetworks != null) {
for (Preference p : selectedNetworks) {
p.setEnabled(b);
}
}
}
private void setIntegrationsItemVisibility() {
ui.findPreference(PreferenceController.PREF_SCREEN_FLATTR).setEnabled(FlattrUtils.hasAPICredentials());
}
@SuppressWarnings("deprecation")
private void checkFlattrItemVisibility() {
boolean hasFlattrToken = FlattrUtils.hasToken();
ui.findPreference(PreferenceController.PREF_FLATTR_AUTH).setEnabled(!hasFlattrToken);
ui.findPreference(PreferenceController.PREF_FLATTR_REVOKE).setEnabled(hasFlattrToken);
ui.findPreference(PreferenceController.PREF_AUTO_FLATTR_PREFS).setEnabled(hasFlattrToken);
}
private void checkAutodownloadItemVisibility(boolean autoDownload) {
ui.findPreference(UserPreferences.PREF_EPISODE_CACHE_SIZE).setEnabled(autoDownload);
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL_ON_BATTERY).setEnabled(autoDownload);
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL_WIFI_FILTER).setEnabled(autoDownload);
ui.findPreference(UserPreferences.PREF_EPISODE_CLEANUP).setEnabled(autoDownload);
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL_ON_MOBILE).setEnabled(autoDownload);
setSelectedNetworksEnabled(autoDownload && UserPreferences.isEnableAutodownloadWifiFilter());
}
private void checkSonicItemVisibility() {
if (Build.VERSION.SDK_INT < 16) {
ListPreference p = (ListPreference) ui.findPreference(UserPreferences.PREF_MEDIA_PLAYER);
p.setEntries(R.array.media_player_options_no_sonic);
p.setEntryValues(R.array.media_player_values_no_sonic);
}
}
private void setUpdateIntervalText() {
Context context = ui.getActivity().getApplicationContext();
String val;
long interval = UserPreferences.getUpdateInterval();
if(interval > 0) {
int hours = (int) TimeUnit.MILLISECONDS.toHours(interval);
String hoursStr = context.getResources().getQuantityString(R.plurals.time_hours_quantified, hours, hours);
val = String.format(context.getString(R.string.pref_autoUpdateIntervallOrTime_every), hoursStr);
} else {
int[] timeOfDay = UserPreferences.getUpdateTimeOfDay();
if(timeOfDay.length == 2) {
Calendar cal = new GregorianCalendar();
cal.set(Calendar.HOUR_OF_DAY, timeOfDay[0]);
cal.set(Calendar.MINUTE, timeOfDay[1]);
String timeOfDayStr = DateFormat.getTimeFormat(context).format(cal.getTime());
val = String.format(context.getString(R.string.pref_autoUpdateIntervallOrTime_at),
timeOfDayStr);
} else {
val = context.getString(R.string.pref_smart_mark_as_played_disabled); // TODO: Is this a bug? Otherwise document why is this related to smart mark???
}
}
String summary = context.getString(R.string.pref_autoUpdateIntervallOrTime_sum) + "\n"
+ String.format(context.getString(R.string.pref_current_value), val);
ui.findPreference(UserPreferences.PREF_UPDATE_INTERVAL).setSummary(summary);
}
private void setParallelDownloadsText(int downloads) {
final Resources res = ui.getActivity().getResources();
String s = Integer.toString(downloads)
+ res.getString(R.string.parallel_downloads_suffix);
ui.findPreference(UserPreferences.PREF_PARALLEL_DOWNLOADS).setSummary(s);
}
private void setEpisodeCacheSizeText(int cacheSize) {
final Resources res = ui.getActivity().getResources();
String s;
if (cacheSize == res.getInteger(
R.integer.episode_cache_size_unlimited)) {
s = res.getString(R.string.pref_episode_cache_unlimited);
} else {
s = Integer.toString(cacheSize)
+ res.getString(R.string.episodes_suffix);
}
ui.findPreference(UserPreferences.PREF_EPISODE_CACHE_SIZE).setSummary(s);
}
private void setDataFolderText() {
File f = UserPreferences.getDataFolder(null);
if (f != null) {
ui.findPreference(PreferenceController.PREF_CHOOSE_DATA_DIR)
.setSummary(f.getAbsolutePath());
}
}
private static String blankIfNull(String val) {
return val == null ? "" : val;
}
private void buildAutodownloadSelectedNetworksPreference() {
final Activity activity = ui.getActivity();
if (selectedNetworks != null) {
clearAutodownloadSelectedNetworsPreference();
}
// get configured networks
WifiManager wifiservice = (WifiManager) activity.getApplicationContext().getSystemService(Context.WIFI_SERVICE);
List<WifiConfiguration> networks = wifiservice.getConfiguredNetworks();
if (networks == null) {
Log.e(TAG, "Couldn't get list of configure Wi-Fi networks");
return;
}
Collections.sort(networks, (x, y) ->
blankIfNull(x.SSID).compareTo(blankIfNull(y.SSID)));
selectedNetworks = new CheckBoxPreference[networks.size()];
List<String> prefValues = Arrays.asList(UserPreferences
.getAutodownloadSelectedNetworks());
PreferenceScreen prefScreen = ui.getPreferenceScreen();
Preference.OnPreferenceClickListener clickListener = preference -> {
if (preference instanceof CheckBoxPreference) {
String key = preference.getKey();
List<String> prefValuesList = new ArrayList<>(
Arrays.asList(UserPreferences
.getAutodownloadSelectedNetworks())
);
boolean newValue = ((CheckBoxPreference) preference)
.isChecked();
Log.d(TAG, "Selected network " + key + ". New state: " + newValue);
int index = prefValuesList.indexOf(key);
if (index >= 0 && !newValue) {
// remove network
prefValuesList.remove(index);
} else if (index < 0 && newValue) {
prefValuesList.add(key);
}
UserPreferences.setAutodownloadSelectedNetworks(
prefValuesList.toArray(new String[prefValuesList.size()])
);
return true;
} else {
return false;
}
};
// create preference for each known network. attach listener and set
// value
for (int i = 0; i < networks.size(); i++) {
WifiConfiguration config = networks.get(i);
CheckBoxPreference pref = new CheckBoxPreference(activity);
String key = Integer.toString(config.networkId);
pref.setTitle(config.SSID);
pref.setKey(key);
pref.setOnPreferenceClickListener(clickListener);
pref.setPersistent(false);
pref.setChecked(prefValues.contains(key));
selectedNetworks[i] = pref;
prefScreen.addPreference(pref);
}
}
private void clearAutodownloadSelectedNetworsPreference() {
if (selectedNetworks != null) {
PreferenceScreen prefScreen = ui.getPreferenceScreen();
for (CheckBoxPreference network : selectedNetworks) {
if (network != null) {
prefScreen.removePreference(network);
}
}
}
}
private void showDrawerPreferencesDialog() {
final Context context = ui.getActivity();
final List<String> hiddenDrawerItems = UserPreferences.getHiddenDrawerItems();
final String[] navTitles = context.getResources().getStringArray(R.array.nav_drawer_titles);
final String[] NAV_DRAWER_TAGS = MainActivity.NAV_DRAWER_TAGS;
boolean[] checked = new boolean[MainActivity.NAV_DRAWER_TAGS.length];
for(int i=0; i < NAV_DRAWER_TAGS.length; i++) {
String tag = NAV_DRAWER_TAGS[i];
if(!hiddenDrawerItems.contains(tag)) {
checked[i] = true;
}
}
AlertDialog.Builder builder = new AlertDialog.Builder(context);
builder.setTitle(R.string.drawer_preferences);
builder.setMultiChoiceItems(navTitles, checked, (dialog, which, isChecked) -> {
if (isChecked) {
hiddenDrawerItems.remove(NAV_DRAWER_TAGS[which]);
} else {
hiddenDrawerItems.add(NAV_DRAWER_TAGS[which]);
}
});
builder.setPositiveButton(R.string.confirm_label, (dialog, which) ->
UserPreferences.setHiddenDrawerItems(hiddenDrawerItems));
builder.setNegativeButton(R.string.cancel_label, null);
builder.create().show();
}
private void showNotificationButtonsDialog() {
final Context context = ui.getActivity();
final List<Integer> preferredButtons = UserPreferences.getCompactNotificationButtons();
final String[] allButtonNames = context.getResources().getStringArray(
R.array.compact_notification_buttons_options);
boolean[] checked = new boolean[allButtonNames.length]; // booleans default to false in java
for(int i=0; i < checked.length; i++) {
if(preferredButtons.contains(i)) {
checked[i] = true;
}
}
AlertDialog.Builder builder = new AlertDialog.Builder(context);
builder.setTitle(String.format(context.getResources().getString(
R.string.pref_compact_notification_buttons_dialog_title), 2));
builder.setMultiChoiceItems(allButtonNames, checked, (dialog, which, isChecked) -> {
checked[which] = isChecked;
if (isChecked) {
if (preferredButtons.size() < 2) {
preferredButtons.add(which);
} else {
// Only allow a maximum of two selections. This is because the notification
// on the lock screen can only display 3 buttons, and the play/pause button
// is always included.
checked[which] = false;
ListView selectionView = ((AlertDialog) dialog).getListView();
selectionView.setItemChecked(which, false);
Snackbar.make(
selectionView,
String.format(context.getResources().getString(
R.string.pref_compact_notification_buttons_dialog_error), 2),
Snackbar.LENGTH_SHORT).show();
}
} else {
preferredButtons.remove((Integer) which);
}
});
builder.setPositiveButton(R.string.confirm_label, (dialog, which) ->
UserPreferences.setCompactNotificationButtons(preferredButtons));
builder.setNegativeButton(R.string.cancel_label, null);
builder.create().show();
}
// CHOOSE DATA FOLDER
private void requestPermission() {
ActivityCompat.requestPermissions(ui.getActivity(), EXTERNAL_STORAGE_PERMISSIONS,
PERMISSION_REQUEST_EXTERNAL_STORAGE);
}
private void openDirectoryChooser() {
Activity activity = ui.getActivity();
Intent intent = new Intent(activity, DirectoryChooserActivity.class);
activity.startActivityForResult(intent, DirectoryChooserActivity.RESULT_CODE_DIR_SELECTED);
}
private void showChooseDataFolderDialog() {
ChooseDataFolderDialog.showDialog(
ui.getActivity(), new ChooseDataFolderDialog.RunnableWithString() {
@Override
public void run(final String folder) {
UserPreferences.setDataFolder(folder);
setDataFolderText();
}
});
}
// UPDATE TIME/INTERVAL DIALOG
private void showUpdateIntervalTimePreferencesDialog() {
final Context context = ui.getActivity();
MaterialDialog.Builder builder = new MaterialDialog.Builder(context);
builder.title(R.string.pref_autoUpdateIntervallOrTime_title);
builder.content(R.string.pref_autoUpdateIntervallOrTime_message);
builder.positiveText(R.string.pref_autoUpdateIntervallOrTime_Interval);
builder.negativeText(R.string.pref_autoUpdateIntervallOrTime_TimeOfDay);
builder.neutralText(R.string.pref_autoUpdateIntervallOrTime_Disable);
builder.onPositive((dialog, which) -> {
AlertDialog.Builder builder1 = new AlertDialog.Builder(context);
builder1.setTitle(context.getString(R.string.pref_autoUpdateIntervallOrTime_Interval));
final String[] values = context.getResources().getStringArray(R.array.update_intervall_values);
final String[] entries = getUpdateIntervalEntries(values);
long currInterval = UserPreferences.getUpdateInterval();
int checkedItem = -1;
if(currInterval > 0) {
String currIntervalStr = String.valueOf(TimeUnit.MILLISECONDS.toHours(currInterval));
checkedItem = ArrayUtils.indexOf(values, currIntervalStr);
}
builder1.setSingleChoiceItems(entries, checkedItem, (dialog1, which1) -> {
int hours = Integer.parseInt(values[which1]);
UserPreferences.setUpdateInterval(hours);
dialog1.dismiss();
setUpdateIntervalText();
});
builder1.setNegativeButton(context.getString(R.string.cancel_label), null);
builder1.show();
});
builder.onNegative((dialog, which) -> {
int hourOfDay = 7, minute = 0;
int[] updateTime = UserPreferences.getUpdateTimeOfDay();
if (updateTime.length == 2) {
hourOfDay = updateTime[0];
minute = updateTime[1];
}
TimePickerDialog timePickerDialog = new TimePickerDialog(context,
(view, selectedHourOfDay, selectedMinute) -> {
if (view.getTag() == null) { // onTimeSet() may get called twice!
view.setTag("TAGGED");
UserPreferences.setUpdateTimeOfDay(selectedHourOfDay, selectedMinute);
setUpdateIntervalText();
}
}, hourOfDay, minute, DateFormat.is24HourFormat(context));
timePickerDialog.setTitle(context.getString(R.string.pref_autoUpdateIntervallOrTime_TimeOfDay));
timePickerDialog.show();
});
builder.onNeutral((dialog, which) -> {
UserPreferences.setUpdateInterval(0);
setUpdateIntervalText();
});
builder.show();
}
public interface PreferenceUI {
void setFragment(PreferenceFragmentCompat fragment);
PreferenceFragmentCompat getFragment();
/**
* Finds a preference based on its key.
*/
Preference findPreference(CharSequence key);
PreferenceScreen getPreferenceScreen();
AppCompatActivity getActivity();
}
}
| 1 | 14,210 | There is a doubled semicolon ;) | AntennaPod-AntennaPod | java |
@@ -40,6 +40,10 @@
#include <math.h>
+#include <zlib.h>
+#include "openssl/md5.h"
+#include <openssl/sha.h>
+
#define MathSqrt(op, err) sqrt(op)
#include <ctype.h> | 1 | /*********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: <file>
* Description:
*
*
* Created: 7/10/95
* Language: C++
*
*
*
*
*****************************************************************************
*/
#include "Platform.h"
#include <math.h>
#define MathSqrt(op, err) sqrt(op)
#include <ctype.h>
#include <string.h>
#include <stdio.h>
#include "NLSConversion.h"
#include "nawstring.h"
#include "exp_stdh.h"
#include "exp_clause_derived.h"
#include "exp_function.h"
#include "ComDefs.h"
#include "SQLTypeDefs.h"
#include "exp_datetime.h"
#include "exp_interval.h"
#include "exp_bignum.h"
#include "ComSysUtils.h"
#include "wstr.h"
#include "ComDiags.h"
#include "ComAnsiNamePart.h"
#include "ComSqlId.h"
#include "ex_globals.h"
#include "NAUserId.h"
#include "ComUser.h"
#include "ExpSeqGen.h"
#undef DllImport
#define DllImport __declspec ( dllimport )
#include "rosetta/rosgen.h"
#define ptimez_h_juliantimestamp
#define ptimez_h_including_section
#include "guardian/ptimez.h"
#ifdef ptimez_h_juliantimestamp
Section missing, generate compiler error
#endif
#define ptimez_h_converttimestamp
#define ptimez_h_including_section
#include "guardian/ptimez.h"
#ifdef ptimez_h_converttimestamp
Section missing, generate compiler error
#endif
#define ptimez_h_interprettimestamp
#define ptimez_h_including_section
#include "guardian/ptimez.h"
#ifdef ptimez_h_interprettimestamp
Section missing, generate compiler error
#endif
#define ptimez_h_computetimestamp
#define ptimez_h_including_section
#include "guardian/ptimez.h"
#ifdef ptimez_h_computetimestamp
Section missing, generate compiler error
#endif
#define psecure_h_including_section
#define psecure_h_security_app_priv_
#define psecure_h_security_psb_get_
#define psecure_h_security_ntuser_set_
#include "security/psecure.h"
#ifndef dsecure_h_INCLUDED
#define dsecure_h_INCLUDED
#include "security/dsecure.h"
#endif
#include "security/uid.h"
#include "security/uid.h"
#include "fs/feerrors.h"
extern char * exClauseGetText(OperatorTypeEnum ote);
NA_EIDPROC
void setVCLength(char * VCLen, Lng32 VCLenSize, ULng32 value);
//#define TOUPPER(c) (((c >= 'a') && (c <= 'z')) ? (c - 32) : c);
//#define TOLOWER(c) (((c >= 'A') && (c <= 'Z')) ? (c + 32) : c);
// -----------------------------------------------------------------------
// There is currently a bug in the tandem include file sys/time.h that
// prevents us to get the definition of gettimeofday from there.
// -----------------------------------------------------------------------
//extern int gettimeofday(struct timeval *, struct timezone *);
ExFunctionAscii::ExFunctionAscii(){};
ExFunctionChar::ExFunctionChar(){};
ExFunctionConvertHex::ExFunctionConvertHex(){};
ExFunctionRepeat::ExFunctionRepeat(){};
ExFunctionReplace::ExFunctionReplace()
{
collation_ = CharInfo::DefaultCollation;
setArgEncodedLen( 0, 0);//initialize the first child encoded length to 0
setArgEncodedLen( 0, 1);//initialize the second child encoded length to 0
};
ex_function_char_length::ex_function_char_length(){};
ex_function_char_length_doublebyte::ex_function_char_length_doublebyte(){};
ex_function_oct_length::ex_function_oct_length(){};
ex_function_position::ex_function_position(){};
ex_function_position_doublebyte::ex_function_position_doublebyte(){};
ex_function_concat::ex_function_concat(){};
ex_function_lower::ex_function_lower(){};
ex_function_upper::ex_function_upper(){};
ex_function_substring::ex_function_substring(){};
ex_function_trim_char::ex_function_trim_char(){};
ExFunctionTokenStr::ExFunctionTokenStr(){};
ex_function_current::ex_function_current(){};
ex_function_unique_execute_id::ex_function_unique_execute_id(){};//Trigger -
ex_function_get_triggers_status::ex_function_get_triggers_status(){};//Trigger -
ex_function_get_bit_value_at::ex_function_get_bit_value_at(){};//Trigger -
ex_function_is_bitwise_and_true::ex_function_is_bitwise_and_true(){};//MV
ex_function_explode_varchar::ex_function_explode_varchar(){};
ex_function_hash::ex_function_hash(){};
ex_function_hivehash::ex_function_hivehash(){};
ExHashComb::ExHashComb(){};
ExHiveHashComb::ExHiveHashComb(){};
ExHDPHash::ExHDPHash(){};
ExHDPHashComb::ExHDPHashComb(){};
ex_function_replace_null::ex_function_replace_null(){};
ex_function_mod::ex_function_mod(){};
ex_function_mask::ex_function_mask(){};
ExFunctionShift::ExFunctionShift(){};
ex_function_converttimestamp::ex_function_converttimestamp(){};
ex_function_dateformat::ex_function_dateformat(){};
ex_function_dayofweek::ex_function_dayofweek(){};
ex_function_extract::ex_function_extract(){};
ex_function_juliantimestamp::ex_function_juliantimestamp(){};
ex_function_exec_count::ex_function_exec_count(){};
ex_function_curr_transid::ex_function_curr_transid(){};
ex_function_ansi_user::ex_function_ansi_user(){};
ex_function_user::ex_function_user(){};
ex_function_nullifzero::ex_function_nullifzero(){};
ex_function_nvl::ex_function_nvl(){};
ex_function_queryid_extract::ex_function_queryid_extract(){};
ExFunctionUniqueId::ExFunctionUniqueId(){};
ExFunctionRowNum::ExFunctionRowNum(){};
ExFunctionHbaseColumnLookup::ExFunctionHbaseColumnLookup() {};
ExFunctionHbaseColumnsDisplay::ExFunctionHbaseColumnsDisplay() {};
ExFunctionHbaseColumnCreate::ExFunctionHbaseColumnCreate() {};
ExFunctionCastType::ExFunctionCastType() {};
ExFunctionSequenceValue::ExFunctionSequenceValue() {};
ExFunctionHbaseTimestamp::ExFunctionHbaseTimestamp() {};
ExFunctionHbaseVersion::ExFunctionHbaseVersion() {};
ExFunctionSVariance::ExFunctionSVariance(){};
ExFunctionSStddev::ExFunctionSStddev(){};
ExpRaiseErrorFunction::ExpRaiseErrorFunction(){};
ExFunctionRandomNum::ExFunctionRandomNum(){};
ExFunctionGenericUpdateOutput::ExFunctionGenericUpdateOutput(){}; // MV,
ExFunctionInternalTimestamp::ExFunctionInternalTimestamp(){}; // Triggers
ExFunctionRandomSelection::ExFunctionRandomSelection(){};
ExHash2Distrib::ExHash2Distrib(){};
ExProgDistrib::ExProgDistrib(){};
ExProgDistribKey::ExProgDistribKey(){};
ExPAGroup::ExPAGroup(){};
ExFunctionPack::ExFunctionPack(){};
ExUnPackCol::ExUnPackCol(){};
ExFunctionRangeLookup::ExFunctionRangeLookup(){};
ExAuditImage::ExAuditImage(){};
ExFunctionIsIP::ExFunctionIsIP(){};
ExFunctionInetAton::ExFunctionInetAton(){};
ExFunctionInetNtoa::ExFunctionInetNtoa(){};
ExFunctionAscii::ExFunctionAscii(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionChar::ExFunctionChar(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionIsIP::ExFunctionIsIP(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionInetAton::ExFunctionInetAton(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionInetNtoa::ExFunctionInetNtoa(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionConvertHex::ExFunctionConvertHex(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionRepeat::ExFunctionRepeat(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExFunctionReplace::ExFunctionReplace(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 4, attr, space)
{
collation_ = CharInfo::DefaultCollation;
//set first and second child encoded length
setArgEncodedLen( 0, 0);//initialize the first child encoded length to 0
setArgEncodedLen( 0, 1);//initialize the second child encoded length to 0
};
ex_function_char_length::ex_function_char_length(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_char_length_doublebyte::ex_function_char_length_doublebyte(
OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_oct_length::ex_function_oct_length(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_position::ex_function_position(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_position_doublebyte::ex_function_position_doublebyte
(
OperatorTypeEnum oper_type,
Attributes ** attr, Space * space
)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_concat::ex_function_concat(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_lower::ex_function_lower(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_upper::ex_function_upper(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_substring::ex_function_substring(OperatorTypeEnum oper_type,
short num_operands,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, num_operands, attr, space)
{
};
ex_function_translate::ex_function_translate(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
Int32 conv_type)
: ex_function_clause(oper_type, 2 , attr, space)
{
conv_type_= conv_type;
};
ex_function_trim::ex_function_trim(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
Int32 mode)
: ex_function_clause(oper_type, 3 , attr, space)
{
mode_ = mode;
};
ex_function_trim_char::ex_function_trim_char(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
Int32 mode)
: ex_function_trim(oper_type, attr, space, mode)
{
};
ExFunctionTokenStr::ExFunctionTokenStr(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_current::ex_function_current(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
//++ Triggers -
ex_function_unique_execute_id::ex_function_unique_execute_id(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
//++ Triggers -
ex_function_get_triggers_status::ex_function_get_triggers_status(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
//++ Triggers -
ex_function_get_bit_value_at::ex_function_get_bit_value_at(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
//++ MV
ex_function_is_bitwise_and_true::ex_function_is_bitwise_and_true(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_explode_varchar::ex_function_explode_varchar(OperatorTypeEnum oper_type,
short num_operands,
Attributes ** attr,
Space * space,
NABoolean forInsert)
: ex_function_clause(oper_type, num_operands, attr, space),
forInsert_(forInsert)
{
};
ex_function_hash::ex_function_hash(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_hivehash::ex_function_hivehash(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExHashComb::ExHashComb(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExHiveHashComb::ExHiveHashComb(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExHDPHash::ExHDPHash(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExHDPHashComb::ExHDPHashComb(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_replace_null::ex_function_replace_null(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space)
: ex_function_clause(oper_type, 4, attr, space)
{
};
ex_function_mod::ex_function_mod(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_mask::ex_function_mask(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExFunctionShift::ExFunctionShift(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_bool::ex_function_bool(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ex_function_converttimestamp::ex_function_converttimestamp
( OperatorTypeEnum oper_type
, Attributes ** attr
, Space * space
)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_dateformat::ex_function_dateformat(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
Int32 dateformat)
: ex_function_clause(oper_type, 2 , attr, space), dateformat_(dateformat)
{
};
ex_function_dayofweek::ex_function_dayofweek(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_extract::ex_function_extract(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
rec_datetime_field extractField)
: ex_function_clause(oper_type, 2 , attr, space), extractField_(extractField)
{
};
ex_function_juliantimestamp::ex_function_juliantimestamp
( OperatorTypeEnum oper_type
, Attributes ** attr
, Space * space
)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_exec_count::ex_function_exec_count
( OperatorTypeEnum oper_type
, Attributes ** attr
, Space * space
)
: ex_function_clause(oper_type, 1, attr, space)
{
execCount_ = 0;
};
ex_function_curr_transid::ex_function_curr_transid
( OperatorTypeEnum oper_type
, Attributes ** attr
, Space * space
)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ex_function_ansi_user::ex_function_ansi_user(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ex_function_user::ex_function_user(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_nullifzero::ex_function_nullifzero(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_nvl::ex_function_nvl(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_queryid_extract::ex_function_queryid_extract(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExFunctionUniqueId::ExFunctionUniqueId(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ExFunctionRowNum::ExFunctionRowNum(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ExFunctionHbaseColumnLookup::ExFunctionHbaseColumnLookup(OperatorTypeEnum oper_type,
Attributes ** attr,
const char * colName,
Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
strcpy(colName_, colName);
};
ExFunctionHbaseColumnsDisplay::ExFunctionHbaseColumnsDisplay(OperatorTypeEnum oper_type,
Attributes ** attr,
Lng32 numCols,
char * colNames,
Space * space)
: ex_function_clause(oper_type, 2, attr, space),
numCols_(numCols),
colNames_(colNames)
{
};
ExFunctionHbaseColumnCreate::ExFunctionHbaseColumnCreate(OperatorTypeEnum oper_type,
Attributes ** attr,
short numEntries,
short colNameMaxLen,
Int32 colValMaxLen,
short colValVCIndLen,
Space * space)
: ex_function_clause(oper_type, 1, attr, space),
numEntries_(numEntries),
colNameMaxLen_(colNameMaxLen),
colValMaxLen_(colValMaxLen),
colValVCIndLen_(colValVCIndLen)
{
};
ExFunctionSequenceValue::ExFunctionSequenceValue(OperatorTypeEnum oper_type,
Attributes ** attr,
const SequenceGeneratorAttributes &sga,
Space * space)
: ex_function_clause(oper_type, 1, attr, space),
sga_(sga),
flags_(0)
{
};
ExFunctionHbaseTimestamp::ExFunctionHbaseTimestamp(
OperatorTypeEnum oper_type,
Attributes ** attr,
Lng32 colIndex,
Space * space)
: ex_function_clause(oper_type, 2, attr, space),
colIndex_(colIndex),
flags_(0)
{
};
ExFunctionHbaseVersion::ExFunctionHbaseVersion(
OperatorTypeEnum oper_type,
Attributes ** attr,
Lng32 colIndex,
Space * space)
: ex_function_clause(oper_type, 2, attr, space),
colIndex_(colIndex),
flags_(0)
{
};
ExFunctionCastType::ExFunctionCastType(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionSVariance::ExFunctionSVariance(Attributes **attr, Space *space)
: ex_function_clause(ITM_VARIANCE, 4, attr, space)
{
};
ExFunctionSStddev::ExFunctionSStddev(Attributes **attr, Space *space)
: ex_function_clause(ITM_STDDEV, 4, attr, space)
{
};
ExpRaiseErrorFunction::ExpRaiseErrorFunction (Attributes **attr,
Space *space,
Lng32 sqlCode,
NABoolean raiseError,
const char *constraintName,
const char *tableName,
const NABoolean hasStringExp) // -- Triggers
: ex_function_clause (ITM_RAISE_ERROR, (hasStringExp ? 2 : 1), attr, space),
theSQLCODE_(sqlCode),
constraintName_((char *)constraintName),
tableName_((char *)tableName)
{
setRaiseError(raiseError);
};
ExFunctionRandomNum::ExFunctionRandomNum(OperatorTypeEnum opType,
short num_operands,
NABoolean simpleRandom,
Attributes **attr,
Space *space)
: ex_function_clause(opType, num_operands, attr, space),
flags_(0)
{
seed_ = 0;
if (simpleRandom)
flags_ |= SIMPLE_RANDOM;
}
// MV,
ExFunctionGenericUpdateOutput::ExFunctionGenericUpdateOutput(OperatorTypeEnum oper_type,
Attributes **attr,
Space *space)
: ex_function_clause(oper_type, 1, attr, space)
{}
// Triggers
ExFunctionInternalTimestamp::ExFunctionInternalTimestamp(OperatorTypeEnum oper_type,
Attributes **attr,
Space *space)
: ex_function_clause(oper_type, 1, attr, space)
{}
// Triggers
ex_expr::exp_return_type ex_function_get_bit_value_at::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
#pragma nowarn(1506) // warning elimination
Lng32 buffLen = getOperand(1)->getLength(op_data[1]);
#pragma warn(1506) // warning elimination
// Get the position from operand 2.
Lng32 pos = *(Lng32 *)op_data[2];
// The character we look into
Lng32 charnum = pos / 8;
// The bit in the character we look into
Lng32 bitnum = 8-(pos % 8)-1;
// Check for error conditions.
if ((charnum >= buffLen) || (charnum < 0))
{
ExRaiseSqlError(heap, diagsArea, EXE_GETBIT_ERROR);
return ex_expr::EXPR_ERROR;
}
unsigned char onechar = *(unsigned char *)(op_data[1] + charnum);
unsigned char mask = 1;
#pragma nowarn(1506) // warning elimination
mask = mask<<bitnum;
#pragma warn(1506) // warning elimination
*((ULng32*)op_data[0]) = (ULng32) (mask & onechar ? 1 : 0);
return ex_expr::EXPR_OK;
}
;
//++ MV
// The function returns True if any of the bits is set in both of the strings
ex_expr::exp_return_type ex_function_is_bitwise_and_true::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
#pragma nowarn(1506) // warning elimination
Lng32 leftSize = getOperand(1)->getLength(op_data[1]);
Lng32 rightSize = getOperand(2)->getLength(op_data[2]);
#pragma warn(1506) // warning elimination
if (leftSize != rightSize)
{
ExRaiseSqlError(heap, diagsArea, EXE_IS_BITWISE_AND_ERROR);
return ex_expr::EXPR_ERROR;
}
// Iterate through all characters until one "bitwise and" returns TRUE
// Starting with False
*(Lng32 *)op_data[0] = 0;
unsigned char *leftCharPtr = (unsigned char *)(op_data[1]);
unsigned char *rightCharPtr = (unsigned char *)(op_data[2]);
unsigned char *endBarrier = rightCharPtr + rightSize;
for (; rightCharPtr < endBarrier; rightCharPtr++, leftCharPtr++)
{
if ((*leftCharPtr) & (*rightCharPtr))
{
*(Lng32 *)op_data[0] = 1;
break;
}
}
return ex_expr::EXPR_OK;
}
ExFunctionRandomSelection::ExFunctionRandomSelection(OperatorTypeEnum opType,
Attributes **attr,
Space *space,
float selProb)
: ExFunctionRandomNum(opType, 1, FALSE, attr, space)
{
if (selProb < 0)
selProb = 0.0;
selProbability_ = selProb;
difference_ = -1;
}
ExHash2Distrib::ExHash2Distrib(Attributes **attr, Space *space)
: ex_function_clause(ITM_HASH2_DISTRIB, 3, attr, space)
{}
ExProgDistrib::ExProgDistrib(Attributes **attr, Space *space)
: ex_function_clause(ITM_PROGDISTRIB, 3, attr, space)
{}
ExProgDistribKey::ExProgDistribKey(Attributes **attr, Space *space)
: ex_function_clause(ITM_PROGDISTRIBKEY, 4, attr, space)
{}
ExPAGroup::ExPAGroup(Attributes **attr, Space *space)
: ex_function_clause(ITM_PAGROUP, 4, attr, space)
{}
ExUnPackCol::ExUnPackCol(Attributes **attr,
Space *space,
Lng32 width,
Lng32 base,
NABoolean nullsPresent)
: width_(width),
base_(base),
ex_function_clause(ITM_UNPACKCOL, 3, attr, space)
{
setNullsPresent(nullsPresent);
};
ExFunctionRangeLookup::ExFunctionRangeLookup(Attributes** attr,
Space* space,
Lng32 numParts,
Lng32 partKeyLen)
: ex_function_clause(ITM_RANGE_LOOKUP, 3, attr, space),
numParts_(numParts),
partKeyLen_(partKeyLen)
{
}
ExAuditImage::ExAuditImage(Attributes** attr,
Space* space,
ExpDP2ExprPtr auditImageContainerExpr
)
: ex_function_clause(ITM_AUDIT_IMAGE, 2, attr, space),
auditImageContainerExpr_(auditImageContainerExpr)
{
}
ex_expr::exp_return_type ex_function_concat::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
#pragma nowarn(1506) // warning elimination
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
#pragma warn(1506) // warning elimination
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
Lng32 max_len = getOperand(0)->getLength();
if ((len1 + len2) > max_len) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
Int32 actual_length = len1+len2;
// If operand 0 is varchar, store the sum of operand 1 length and
// operand 2 length in the varlen area.
getOperand(0)->setVarLength((actual_length), op_data[-MAX_OPERANDS]);
// Now, copy the contents of operand 1 followed by the contents of
// operand 2 into operand 0.
str_cpy_all(op_data[0], op_data[1], len1);
str_cpy_all(&op_data[0][len1], op_data[2], len2);
//
// Blankpad the target (if needed).
//
if ((actual_length) < max_len)
str_pad(&op_data[0][actual_length], max_len - actual_length, ' ');
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ExFunctionRepeat::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Lng32 repeatCount = *(Lng32 *)op_data[2];
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
Lng32 resultMaxLen = getOperand(0)->getLength();
if ((repeatCount < 0) || ((repeatCount * len1) > resultMaxLen))
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
Lng32 currPos = 0;
for (Int32 i = 0; i < repeatCount; i++)
{
str_cpy_all(&op_data[0][currPos], op_data[1], len1);
currPos += len1;
}
// If operand 0 is varchar, store the length.
getOperand(0)->setVarLength(currPos, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ExFunctionReplace::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(0))->getCharSet();
// Note: all lengths are byte lengths.
// source string
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
char * str1 = op_data[1];
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( str1, prec1, len1, cs );
}
// if caseinsensitive search is to be done, make a copy of the source
// string and upshift it. This string will be used to do the search.
// The original string will be used to replace.
char * searchStr1 = str1;
if ((caseInsensitiveOperation()) && (heap) && (str1))
{
searchStr1 = new(heap) char[len1];
str_cpy_convert(searchStr1, str1, len1, 1);
}
// string to search for in string1
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
char * str2 = op_data[2];
// string to replace string2 with in string1
Lng32 len3 = getOperand(3)->getLength(op_data[-MAX_OPERANDS+3]);
char * str3 = op_data[3];
if ( cs == CharInfo::UTF8 )
{
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( str2, prec2, len2, cs );
Int32 prec3 = ((SimpleType *)getOperand(3))->getPrecision();
len3 = Attributes::trimFillerSpaces( str3, prec3, len3, cs );
}
Lng32 resultMaxLen = getOperand(0)->getLength();
char * result = op_data[0];
char * sourceStr = searchStr1;
char * searchStr = str2;
Int32 lenSourceStr = len1; //getArgEncodedLen(0);
Int32 lenSearchStr = len2; //getArgEncodedLen(1);
Int32 effLenSourceStr = len1; //getArgEncodedLen(0);
Int32 effLenSearchStr = len2; //getArgEncodedLen(1);
Int16 nPasses = 1;
if (CollationInfo::isSystemCollation(getCollation()))
{
// LCOV_EXCL_START
nPasses= CollationInfo::getCollationNPasses(getCollation());
lenSourceStr = getArgEncodedLen(0);
lenSearchStr = getArgEncodedLen(1);
assert (heap);
sourceStr = new(heap) char [lenSourceStr];
ex_function_encode::encodeCollationSearchKey(
(UInt8 *) str1,
len1,
(UInt8 *) sourceStr,
lenSourceStr,
(Int32 &) effLenSourceStr,
nPasses,
getCollation(),
TRUE);
searchStr = new(heap) char [lenSearchStr];
ex_function_encode::encodeCollationSearchKey(
(UInt8 *) str2,
len2,
(UInt8 *) searchStr,
lenSearchStr,
(Int32 &)effLenSearchStr,
nPasses,
getCollation(),
TRUE);
// LCOV_EXCL_START
}
short bpc = (getOperand(1)->widechar() ? 2 : 1);
NABoolean done = FALSE;
Lng32 position;
Lng32 currPosStr1 = 0;
Lng32 currLenStr1 = len1;
Lng32 currPosResult = 0;
Lng32 currLenResult = 0;
while (! done)
{
position =
ex_function_position::findPosition(&sourceStr[currPosStr1 * nPasses],
currLenStr1 * nPasses,
searchStr,
effLenSearchStr,
bpc,
nPasses,
getCollation(),
0,
cs);
if(position < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("REPLACE FUNCTION");
return ex_expr::EXPR_ERROR;
}
if (position > 0)
{
position = position - 1;
// copy part of str1 from currPosStr1 till position into result
if ((currLenResult + position) > resultMaxLen) {
if (sourceStr && sourceStr != str1)
NADELETEBASIC(sourceStr,(heap));
if (searchStr && searchStr != str2)
NADELETEBASIC(searchStr,(heap));
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (position > 0)
{
str_cpy_all(&result[currPosResult], &str1[currPosStr1],
position);
}
currPosResult += position;
currLenResult += position;
currPosStr1 += (position + len2) ;
currLenStr1 -= (position + len2) ;
// now copy str3 to result. This is the replacement.
if ((currLenResult + len3) > resultMaxLen) {
if (sourceStr && sourceStr != str1)
NADELETEBASIC(sourceStr,(heap));
if (searchStr && searchStr != str2)
NADELETEBASIC(searchStr,(heap));
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
str_cpy_all(&result[currPosResult], str3, len3);
currLenResult += len3;
currPosResult += len3;
}
else
{
done = TRUE;
if ((currLenResult + currLenStr1) > resultMaxLen) {
if (sourceStr && sourceStr != str1)
NADELETEBASIC(sourceStr,(heap));
if (searchStr && searchStr != str2)
NADELETEBASIC(searchStr,(heap));
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (currLenStr1 > 0)
str_cpy_all(&result[currPosResult], &str1[currPosStr1], currLenStr1);
currLenResult += currLenStr1;
}
}
// If operand 0 is varchar, store the length.
getOperand(0)->setVarLength(currLenResult, op_data[-MAX_OPERANDS]);
if (sourceStr && sourceStr != str1)
NADELETEBASIC(sourceStr,(heap));
if (searchStr && searchStr != str2)
NADELETEBASIC(searchStr,(heap));
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_substring::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int32 len1_bytes = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
// Get the starting position in characters from operand 2.
// This may be a negative value!
Int32 specifiedCharStartPos = *(Lng32 *)op_data[2];
// Starting position in bytes. It can NOT be a negative value.
Int32 startByteOffset = 0; // Assume beginning of buffer for now.
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
// Convert number of character to offset in buffer.
if(specifiedCharStartPos > 1)
{
startByteOffset = Attributes::convertCharToOffset(op_data[1], specifiedCharStartPos, len1_bytes, cs);
if(startByteOffset < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("SUBSTRING FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
else { /* Leave startByteOffset at 0 */ }
// If operand 3 exists, get the length of substring in characters from operand
// 3. Otherwise, if specifiedCharStartPos > 0, length is from specifiedCharStartPos char to end of buf.
// If specifiedCharStartPos is 0, length is all of buf except last character.
// If specifiedCharStartPos is negative, length is even less (by that negative amount).
Int32 inputLen_bytes = len1_bytes ; // Assume byte count = length of string for now
Int32 specifiedLenInChars = inputLen_bytes ; // Assume char count = byte count for now
Int32 prec1 = 0;
if (getNumOperands() == 4)
specifiedLenInChars = *(Lng32 *)op_data[3]; // Use specified desired length for now
if ( cs == CharInfo::UTF8 )
{
prec1 = ((SimpleType *)getOperand(1))->getPrecision();
if ( prec1 )
inputLen_bytes = Attributes::trimFillerSpaces( op_data[1], prec1, inputLen_bytes, cs );
}
// NOTE: Following formula for lastChar works even if specifiedCharStartPos is 0 or negative.
Int32 lastChar = specifiedLenInChars + (specifiedCharStartPos - 1);
// The end of the substr as a byte offset
Int32 endOff_bytes = inputLen_bytes; // Assume length of input for now.
Int32 actualLenInBytes = 0;
if ( startByteOffset >= inputLen_bytes )
{
// Nothing left in buf to copy, so endOff_bytes and actualLenInBytes are OK as is.
startByteOffset = inputLen_bytes; // IGNORE it if specified start > end of buffer!
;
}
else if (lastChar > 0)
{
endOff_bytes = Attributes::convertCharToOffset (op_data[1], lastChar+1, inputLen_bytes, cs);
if(endOff_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("SUBSTRING FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
else endOff_bytes = 0;
// Check for error conditions. endOff_bytes will be less than startByteOffset if length is
// less than 0.
if (endOff_bytes < startByteOffset)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_SUBSTRING_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
actualLenInBytes = endOff_bytes - startByteOffset;
// Now, copy the substring of operand 1 from the starting position into
// operand 0, if actualLenInBytes is greater than 0.
if ( actualLenInBytes > 0)
str_cpy_all(op_data[0], &op_data[1][startByteOffset], actualLenInBytes);
//
// Blankpad the target (if needed).
//
Int32 len0_bytes = getOperand(0)->getLength();
if ( (actualLenInBytes < len0_bytes) && prec1 )
str_pad(&op_data[0][actualLenInBytes], len0_bytes - actualLenInBytes, ' ');
// store the length of substring in the varlen indicator.
if (getOperand(0)->getVCIndicatorLength() > 0)
getOperand(0)->setVarLength( actualLenInBytes, op_data[-MAX_OPERANDS] );
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_trim_char::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
const Int32 lenSrcStrSmallBuf = 128;
char srcStrSmallBuf[lenSrcStrSmallBuf];
const Int32 lenTrimCharSmallBuf = 8;
char trimCharSmallBuf[lenTrimCharSmallBuf];
// find out the length of trim character.
#pragma nowarn(1506) // warning elimination
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
#pragma warn(1506) // warning elimination
CharInfo::CharSet cs = ((SimpleType *)getOperand(0))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
Int32 number_bytes = 0;
number_bytes = Attributes::getFirstCharLength(op_data[1], len1, cs);
if(number_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("TRIM FUNCTION");
return ex_expr::EXPR_ERROR;
}
// len1 (length of trim character) must be 1 character. Raise an exception if greater
// than 1.
if (len1 != number_bytes)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_TRIM_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
#pragma nowarn(1506) // warning elimination
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
#pragma warn(1506) // warning elimination
if (cs == CharInfo::UTF8) // If so, must ignore any filler spaces at end of string
{
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
Int16 nPasses = 1;
char * trimChar = op_data[1];
char * srcStr = op_data[2];
Lng32 lenSrcStr = len2;
Lng32 lenTrimChar = len1;
Lng32 effLenSourceStr = len2;
Lng32 effLenTrimChar = len1;
// case of collation --
if (CollationInfo::isSystemCollation(getCollation()))
{
// LCOV_EXCL_START
nPasses = CollationInfo::getCollationNPasses(getCollation());
//get the length of the encoded source string
lenSrcStr = getSrcStrEncodedLength();
//get length of encoded trim character
lenTrimChar = getTrimCharEncodedLength();
assert (heap);
if (lenSrcStr <= lenSrcStrSmallBuf)
{
srcStr = srcStrSmallBuf;
}
else
{
srcStr = new(heap) char [lenSrcStr];
}
//get encoded key
ex_function_encode::encodeCollationSearchKey(
(UInt8 *) op_data[2],
len2,
(UInt8 *) srcStr,
lenSrcStr,
(Int32 &) effLenSourceStr,
nPasses,
getCollation(),
FALSE);
if (lenTrimChar <= lenTrimCharSmallBuf)
{
trimChar = trimCharSmallBuf;
}
else
{
trimChar = new(heap) char [lenTrimChar];
}
//get encoded key
ex_function_encode::encodeCollationSearchKey(
(UInt8 *) op_data[1],
len1,
(UInt8 *) trimChar,
lenTrimChar,
(Int32 &) effLenTrimChar,
nPasses,
getCollation(),
FALSE);
// LCOV_EXCL_STOP
}
// Find how many leading characters in operand 2 correspond to the trim
// character.
Lng32 len0 = len2;
Lng32 start = 0;
NABoolean notEqualFlag = 0;
if ((getTrimMode() == 1) || (getTrimMode() == 2))
{
while (start <= len2 - len1)
{
for(Int32 i= 0; i < lenTrimChar; i++)
{
if(trimChar[i] != srcStr[start * nPasses +i])
{
notEqualFlag = 1;
break;
}
}
if (notEqualFlag == 0)
{
start += len1;
len0 -= len1;
}
else
break;
}
}
// Find how many trailing characters in operand 2 correspond to the trim
// character.
Int32 end = len2;
Int32 endt;
Int32 numberOfCharacterInBuf;
Int32 bufferLength = end - start;
const Int32 smallBufSize = 128;
char smallBuf[smallBufSize];
notEqualFlag = 0;
if ((getTrimMode() == 0) || (getTrimMode() == 2))
{
char *charLengthInBuf;
if(bufferLength <= smallBufSize)
charLengthInBuf = smallBuf;
else
charLengthInBuf = new(heap) char[bufferLength];
numberOfCharacterInBuf =
Attributes::getCharLengthInBuf(op_data[2] + start,
op_data[2] + end, charLengthInBuf, cs);
if(numberOfCharacterInBuf < 0)
{
if (srcStr && srcStr != op_data[2])
NADELETEBASIC(srcStr,(heap));
if (trimChar && trimChar != op_data[1])
NADELETEBASIC(trimChar,(heap));
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("TRIM FUNCTION");
return ex_expr::EXPR_ERROR;
}
while (end >= start + len1)
{
if (charLengthInBuf[--numberOfCharacterInBuf] != len1) break;
endt = end - len1;
for(Int32 i = 0; i < lenTrimChar; i++)
{
if (trimChar[i] != srcStr[endt *nPasses + i])
{
notEqualFlag = 1;
break;
}
}
if(notEqualFlag == 0)
{
end = endt;
len0 -= len1;
}
else
break;
}
if(bufferLength > smallBufSize)
NADELETEBASIC(charLengthInBuf, heap);
}
// Result is always a varchar.
// store the length of trimmed string in the varlen indicator.
getOperand(0)->setVarLength(len0, op_data[-MAX_OPERANDS]);
// Now, copy operand 2 skipping the trim characters into
// operand 0.
if (len0 > 0)
str_cpy_all(op_data[0], &op_data[2][start], len0);
if (srcStr && srcStr != srcStrSmallBuf && srcStr != op_data[2] )
NADELETEBASIC(srcStr,(heap));
if (trimChar && trimChar != trimCharSmallBuf && trimChar != op_data[1])
NADELETEBASIC(trimChar,(heap));
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_lower::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
Int32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
CharInfo::CharSet cs = ((SimpleType *)getOperand(0))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
getOperand(0)->setVarLength(len1, op_data[-MAX_OPERANDS]);
cnv_charset charset = convertCharsetEnum(cs);
Int32 number_bytes;
Int32 total_bytes_out = 0;
char tmpBuf[4];
UInt32 UCS4value;
UInt16 UCS2value;
// Now, copy the contents of operand 1 after the case change into operand 0.
Int32 len0 = 0;
if(cs == CharInfo::ISO88591)
{
while (len0 < len1)
{
op_data[0][len0] = TOLOWER(op_data[1][len0]);
++len0;
}
}
else
{
// If character set is UTF8 or SJIS or ?, convert the string to UCS2,
// call UCS2 lower function and convert the string back.
while (len0 < len1)
{
number_bytes =
LocaleCharToUCS4(op_data[1] + len0, len1 - len0, &UCS4value, charset);
if(number_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("LOWER FUNCTION");
return ex_expr::EXPR_ERROR;
}
if(number_bytes == 1 && (op_data[1][len0] & 0x80) == 0)
{
op_data[0][len0] = TOLOWER(op_data[1][len0]);
++len0;
++total_bytes_out;
}
else
{
UCS2value = UCS4value & 0XFFFF;
UCS4value = unicode_char_set::to_lower(*(NAWchar *)&UCS2value);
Int32 number_bytes_out =
UCS4ToLocaleChar((const UInt32 *)&UCS4value, tmpBuf,
CharInfo::maxBytesPerChar(cs), charset);
if(number_bytes_out < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("LOWER FUNCTION");
return ex_expr::EXPR_ERROR;
}
for (Int32 j = 0; j < number_bytes_out; j++)
{
op_data[0][total_bytes_out] = tmpBuf[j];
total_bytes_out++;
}
len0 += number_bytes;
}
}
}
if (getOperand(0)->getVCIndicatorLength() > 0)
getOperand(0)->setVarLength(total_bytes_out, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_upper::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
Int32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
Int32 len0 = getOperand(0)->getLength();
Int32 in_pos = 0;
Int32 out_pos = 0;
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
Int32 number_bytes;
UInt32 UCS4value = 0;
UInt16 UCS2value = 0;
// Now, copy the contents of operand 1 after the case change into operand 0.
if(cs == CharInfo::ISO88591)
{
while(in_pos < len1)
{
op_data[0][out_pos] = TOUPPER(op_data[1][in_pos]);
++in_pos;
++out_pos;
}
}
else
{
cnv_charset charset = convertCharsetEnum(cs);
// If character set is UTF8 or SJIS or ?, convert the string to UCS2,
// call UCS2 upper function and convert the string back.
while(in_pos < len1)
{
number_bytes =
LocaleCharToUCS4(op_data[1] + in_pos, len1 - in_pos, &UCS4value, charset);
if(number_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("UPPER FUNCTION");
return ex_expr::EXPR_ERROR;
}
if(number_bytes == 1 && (op_data[1][in_pos] & 0x80) == 0)
{
op_data[0][out_pos] = TOUPPER(op_data[1][in_pos]);
++in_pos;
++out_pos;
}
else
{
in_pos += number_bytes;
UCS2value = UCS4value & 0XFFFF;
NAWchar wcUpshift[3];
Int32 charCnt = 1; // Default count to 1
// search against unicode_lower2upper_mapping_table_full
NAWchar* tmpWCP = unicode_char_set::to_upper_full(UCS2value);
if ( tmpWCP )
{
wcUpshift[0] = *tmpWCP++;
wcUpshift[1] = *tmpWCP++;
wcUpshift[2] = *tmpWCP ;
charCnt = (*tmpWCP) ? 3 : 2;
}
else
wcUpshift[0] = unicode_char_set::to_upper(UCS2value);
for (Int32 ii = 0 ; ii < charCnt ; ii++)
{
UInt32 UCS4_val = wcUpshift[ii];
char tmpBuf[8];
Int32 out_bytes = UCS4ToLocaleChar((const UInt32 *)&UCS4_val, tmpBuf,
CharInfo::maxBytesPerChar(cs), charset);
if(out_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("UPPER FUNCTION");
return ex_expr::EXPR_ERROR;
}
if (out_pos + out_bytes > len0)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
for (Int32 j = 0; j < out_bytes; j++)
{
op_data[0][out_pos] = tmpBuf[j];
++out_pos;
}
}
}
}
}
getOperand(0)->setVarLength(out_pos, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_oct_length::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
// Move operand's length into result.
// The data type of result is long.
Int32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
#pragma nowarn(1506) // warning elimination
*(Lng32 *)op_data[0] = len1;
#pragma warn(1506) // warning elimination
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ExFunctionAscii::eval(char *op_data[],CollHeap* heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
Int32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
if (len1 > 0)
{
switch (getOperType() )
{
case ITM_UNICODE_CODE_VALUE:
{
UInt16 temp;
str_cpy_all((char *)&temp, op_data[1], 2);
*(Lng32 *)op_data[0] = temp;
}
break;
case ITM_NCHAR_MP_CODE_VALUE:
{
UInt16 temp;
#if defined( NA_LITTLE_ENDIAN )
// swap the byte order on little-endian machines as NCHAR_MP charsets are stored
// in multi-byte form (i.e. in big-endian order).
temp = reversebytesUS( *((NAWchar*) op_data[1]) );
#else
str_cpy_all((char *)&temp, op_data[1], 2);
#endif
*(UInt32 *)op_data[0] = temp;
}
break;
case ITM_ASCII:
{
Int32 val = (unsigned char)(op_data[1][0]);
if ( (val > 0x7F) && (cs != CharInfo::ISO88591) )
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("ASCII");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
*(UInt32 *)op_data[0] = (unsigned char)(op_data[1][0]);
break;
}
case ITM_CODE_VALUE:
default:
{
UInt32 UCS4value = 0;
if ( cs == CharInfo::ISO88591 )
UCS4value = *(unsigned char *)(op_data[1]);
else
{
UInt32 firstCharLen =
LocaleCharToUCS4(op_data[1], len1, &UCS4value, convertCharsetEnum(cs));
if( firstCharLen < 0 )
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("CODE_VALUE FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
*(Int32 *)op_data[0] = UCS4value;
break;
}
}
}
else
*(Int32 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionChar::eval(char *op_data[],CollHeap* heap,
ComDiagsArea** diagsArea)
{
UInt32 asciiCode = *(Lng32 *)op_data[1];
Int32 charLength = 1;
CharInfo::CharSet cs = ((SimpleType *)getOperand(0))->getCharSet();
if (getOperType() == ITM_CHAR)
{
if (cs == CharInfo::ISO88591)
{
if (asciiCode < 0 || asciiCode > 0xFF)
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("CHAR");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
else
{
op_data[0][0] = (char)asciiCode;
getOperand(0)->setVarLength(1, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
}
else // Must be UTF8 (at least until we support SJIS or some other multi-byte charset)
{
Int32 len0_bytes = getOperand(0)->getLength();
ULng32 * UCS4ptr = (ULng32 *)op_data[1];
Int32 charLength = UCS4ToLocaleChar( UCS4ptr, (char *)op_data[0], len0_bytes, cnv_UTF8 );
if ( charLength < 0 )
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("CHAR FUNCTION");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
else
{
if ( charLength < len0_bytes )
str_pad(((char *)op_data[0]) + charLength, len0_bytes - charLength, ' ');
getOperand(0)->setVarLength(charLength, op_data[-MAX_OPERANDS]);
}
}
}
else
{
// ITM_UNICODE_CHAR or ITM_NCHAR_MP_CHAR
// check if the code value is legal for UNICODE only. No need
// for KANJI/KSC5601 as both take code-point values with any bit-patterns.
if ( (getOperType() == ITM_UNICODE_CHAR) && (asciiCode < 0 || asciiCode >= 0xFFFE))
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("CHAR");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
NAWchar wcharCode = (NAWchar)asciiCode;
#if defined( NA_LITTLE_ENDIAN )
// swap the byte order on little-endian machines as NCHAR_MP charsets are stored
// in multi-byte form (i.e. in big-endian order).
if (getOperType() == ITM_NCHAR_MP_CHAR)
{
//SQ_LINUX #ifdef NA_WINNT
*(NAWchar*)op_data[0] = reversebytesUS(wcharCode);
} else
*(NAWchar*)op_data[0] = wcharCode;
#else
*(NAWchar*)op_data[0] = wcharCode;
#endif
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_char_length::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
Int32 offset = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
Int32 numOfChar = 0;
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if(CharInfo::maxBytesPerChar(cs) == 1)
{
*(Int32 *)op_data[0] = offset;
return ex_expr::EXPR_OK;
}
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
offset = Attributes::trimFillerSpaces( op_data[1], prec1, offset, cs );
}
// convert to number of character
numOfChar = Attributes::convertOffsetToChar (op_data[1], offset, cs);
if(numOfChar < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("CHAR FUNCTION");
return ex_expr::EXPR_ERROR;
}
// Move operand's length into result.
// The data type of result is long.
*(Int32 *)op_data[0] = numOfChar;
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ExFunctionConvertHex::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
static const char HexArray[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
#pragma nowarn(1506) // warning elimination
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
#pragma warn(1506) // warning elimination
if (getOperType() == ITM_CONVERTTOHEX)
{
Int32 i;
if ( DFS2REC::isDoubleCharacter(getOperand(1)->getDatatype()) )
{
NAWchar *w_p = (NAWchar*)op_data[1];
Int32 w_len = len1 / sizeof(NAWchar);
for (i = 0; i < w_len; i++)
{
op_data[0][4 * i ] = HexArray[0x0F & w_p[i] >> 12];
op_data[0][4 * i + 1] = HexArray[0x0F & w_p[i] >> 8];
op_data[0][4 * i + 2] = HexArray[0x0F & w_p[i] >> 4];
op_data[0][4 * i + 3] = HexArray[0x0F & w_p[i]];
}
} else {
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
for (i = 0; i < len1; i++)
{
op_data[0][2 * i] = HexArray[0x0F & op_data[1][i] >> 4];
op_data[0][2 * i + 1] = HexArray[0x0F & op_data[1][i]];
}
}
getOperand(0)->setVarLength(2 * len1, op_data[-MAX_OPERANDS]);
}
else
{
// convert from hex.
// make sure that length is an even number.
if ((len1 % 2) != 0)
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("CONVERTFROMHEX");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
Int32 i = 0;
Int32 j = 0;
while (i < len1)
{
if (((op_data[1][i] >= '0') && (op_data[1][i] <= '9')) ||
((op_data[1][i] >= 'A') && (op_data[1][i] <= 'F')) &&
(((op_data[1][i+1] >= '0') && (op_data[1][i+1] <= '9')) ||
((op_data[1][i+1] >= 'A') && (op_data[1][i+1] <= 'F'))))
{
unsigned char upper4Bits;
unsigned char lower4Bits;
if ((op_data[1][i] >= '0') && (op_data[1][i] <= '9'))
#pragma nowarn(1506) // warning elimination
upper4Bits = (unsigned char)(op_data[1][i]) - '0';
#pragma warn(1506) // warning elimination
else
#pragma nowarn(1506) // warning elimination
upper4Bits = (unsigned char)(op_data[1][i]) - 'A' + 10;
#pragma warn(1506) // warning elimination
if ((op_data[1][i+1] >= '0') && (op_data[1][i+1] <= '9'))
#pragma nowarn(1506) // warning elimination
lower4Bits = (unsigned char)(op_data[1][i+1]) - '0';
#pragma warn(1506) // warning elimination
else
#pragma nowarn(1506) // warning elimination
lower4Bits = (unsigned char)(op_data[1][i+1]) - 'A' + 10;
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
op_data[0][j] = (upper4Bits << 4) | lower4Bits;
#pragma warn(1506) // warning elimination
i += 2;
j++;
}
else
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("CONVERTFROMHEX");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
} // while
getOperand(0)->setVarLength(len1 / 2, op_data[-MAX_OPERANDS]);
} // CONVERTFROMHEX
return ex_expr::EXPR_OK;
}
Int32 ex_function_position::findPosition (char* pat,
Int32 patLen,
char* src,
Int32 srcLen,
NABoolean patternInFront)
{
Int32 i, j, k;
// Pattern must be able to "fit" in source string
if (patLen > srcLen)
return 0;
// One time check at beginning of src string if flag indicate so.
if (patternInFront)
return ((str_cmp(pat, src, patLen) == 0) ? 1 : 0);
// Search for pattern throughout the src string
for (i=0; (i + patLen) <= srcLen; i++) {
NABoolean found = TRUE ;
for (j=i, k=0; found && (k < patLen); k++, j++) {
if (src[j] != pat[k])
found = 0;
}
if (found)
return i+1;
}
return 0;
}
Lng32 ex_function_position::findPosition
(char * sourceStr,
Lng32 sourceLen,
char * searchStr,
Lng32 searchLen,
short bytesPerChar,
Int16 nPasses,
CharInfo::Collation collation,
short charOffsetFlag , // 1: char, 0: offset
CharInfo::CharSet cs )
{
// If searchLen is <= 0 or searchLen > sourceLen or
// if searchStr is not present in sourceStr,
// return a position of 0;
// otherwise return the position of searchStr in
// sourceStr.
if (searchLen <= 0)
// LCOV_EXCL_START
return 0;
// LCOV_EXCL_STOP
Int32 position = 1;
Int32 collPosition = 1;
Int32 char_count = 1;
Int32 number_bytes;
// LCOV_EXCL_START
while (position + searchLen -1 <= sourceLen)
{
if (str_cmp(searchStr, &sourceStr[position-1], (Int32)searchLen) != 0)
if (CollationInfo::isSystemCollation(collation))
{
position += nPasses;
collPosition ++;
}
else
{
number_bytes = Attributes::getFirstCharLength
(&sourceStr[position-1], sourceLen - position + 1, cs);
if(number_bytes <= 0)
return (Lng32)-1;
++char_count;
position += number_bytes;
}
else
{
if (CollationInfo::isSystemCollation(collation))
{
return collPosition;
}
else
{
if(charOffsetFlag)
return char_count;
else
return position;
}
}
}
return 0;
// LCOV_EXCL_STOP
}
ex_expr::exp_return_type
ex_function_char_length_doublebyte::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
// Move operand's length into result.
// The data type of result is long.
*(Lng32 *)op_data[0] =
#pragma nowarn(1506) // warning elimination
(getOperand(1)->getLength(op_data[-MAX_OPERANDS+1])) >> 1;
#pragma warn(1506) // warning elimination
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_position::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
// search for operand 1
#pragma nowarn(1506) // warning elimination
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
#pragma warn(1506) // warning elimination
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
// in operand 2
#pragma nowarn(1506) // warning elimination
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
#pragma warn(1506) // warning elimination
if ( cs == CharInfo::UTF8 )
{
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
// If len1 is 0, return a position of 1.
Lng32 position;
if (len1 > 0)
{
short nPasses = CollationInfo::getCollationNPasses(getCollation());
position = findPosition(op_data[2],
len2,
op_data[1],
len1,
1,
nPasses,
getCollation(),
1,
cs);
if(position < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("POSITION FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
else
{
// if len1 <= 0, return position of 1.
position = 1;
}
// Now copy the position into result which is a long.
*(Int32 *)op_data[0] = position;
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_position_doublebyte::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
#pragma nowarn(1506) // warning elimination
Lng32 len1 = ( getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]) ) / sizeof(NAWchar);
#pragma warn(1506) // warning elimination
// If len1 is 0, return a position of 1.
Lng32 position = 1;
if (len1 > 0)
{
#pragma nowarn(1506) // warning elimination
Lng32 len2 = ( getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]) ) / sizeof(NAWchar);
#pragma warn(1506) // warning elimination
NAWchar* pat = (NAWchar*)op_data[1];
NAWchar* source = (NAWchar*)op_data[2];
// If len1 > len2 or if operand 1 is not present in operand 2, return
// a position of 0; otherwise return the position of operand 1 in
// operand 2.
short found = 0;
while (position+len1-1 <= len2 && !found)
{
if (wc_str_cmp(pat, &source[position-1], (Int32)len1))
position++;
else
found = 1;
}
if (!found) position = 0;
}
// Now copy the position into result which is a long.
*(Lng32 *)op_data[0] = position;
return ex_expr::EXPR_OK;
};
// LCOV_EXCL_START
NA_EIDPROC
static Lng32 findTokenPosition(char * sourceStr, Lng32 sourceLen,
char * searchStr, Lng32 searchLen,
short bytesPerChar)
{
// If searchLen is <= 0 or searchLen > sourceLen or
// if searchStr is not present in sourceStr,
// return a position of 0;
// otherwise return the position of searchStr in
// sourceStr.
Lng32 position = 0;
if (searchLen <= 0)
position = 0;
else
{
NABoolean found = FALSE;
position = 1;
while (position+searchLen-1 <= sourceLen && !found)
{
if (str_cmp(searchStr, &sourceStr[position-1], (Int32)searchLen) != 0)
position += bytesPerChar;
else
found = 1;
}
if (!found) position = 0;
}
return position;
}
ex_expr::exp_return_type ExFunctionTokenStr::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
// search for operand 1
#pragma nowarn(1506) // warning elimination
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
#pragma warn(1506) // warning elimination
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
// in operand 2
#pragma nowarn(1506) // warning elimination
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
#pragma warn(1506) // warning elimination
if ( cs == CharInfo::UTF8 )
{
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
Lng32 position;
position = findTokenPosition(op_data[2], len2, op_data[1], len1, 1);
if (position <= 0)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
Lng32 startPos = position + len1 - 1;
Lng32 i;
if (op_data[2][startPos] == '\'')
{
// find the ending single quote.
startPos++;
i = startPos;
while ((i < len2) &&
(op_data[2][i] != '\''))
i++;
}
else
{
// find the ending space character
// startPos++;
i = startPos;
while ((i < len2) &&
(op_data[2][i] != ' '))
i++;
}
/* if (op_data[2][startPos] != '\'')
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (i == len2)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
*/
str_cpy_all(op_data[0], &op_data[2][startPos], (i - startPos));
if ((i - startPos) <= 0)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
// If result is a varchar, store the length of substring
// in the varlen indicator.
if (getOperand(0)->getVCIndicatorLength() > 0)
getOperand(0)->setVarLength(i - startPos, op_data[-MAX_OPERANDS]);
else
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
};
// LCOV_EXCL_STOP
ex_expr::exp_return_type ex_function_current::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
if (getOperand())
{
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(0);
rec_datetime_field srcStartField;
rec_datetime_field srcEndField;
if (datetimeOpType->getDatetimeFields(datetimeOpType->getPrecision(),
srcStartField,
srcEndField) != 0)
{
return ex_expr::EXPR_ERROR;
}
ExpDatetime::currentTimeStamp(op_data[0],
srcStartField,
srcEndField,
datetimeOpType->getScale());
}
else
{
ExpDatetime::currentTimeStamp(op_data[0],
REC_DATE_YEAR,
REC_DATE_SECOND,
ExpDatetime::MAX_DATETIME_FRACT_PREC);
}
return ex_expr::EXPR_OK;
};
// MV,
ex_expr::exp_return_type ExFunctionGenericUpdateOutput::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// We do not set the value here.
// The return value is written into the space allocated for it by the
// executor work method.
// The value is initialized to zero here in case VSBB is rejected by the
// optimizer, so the executor will not override this value.
if (origFunctionOperType() == ITM_VSBBROWCOUNT)
*(Lng32 *)op_data[0] = 1; // Simple Insert RowCount - 1 row.
else
*(Lng32 *)op_data[0] = 0; // Simple Insert RowType is 0.
return ex_expr::EXPR_OK;
}
// Triggers -
ex_expr::exp_return_type ExFunctionInternalTimestamp::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
ex_function_current currentFun;
return (currentFun.eval(op_data, heap, diagsArea));
}
ex_expr::exp_return_type ex_function_bool::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
ex_expr::exp_return_type retcode = ex_expr::EXPR_OK;
switch (getOperType())
{
case ITM_RETURN_TRUE:
{
*(Lng32 *)op_data[0] = 1;
}
break;
case ITM_RETURN_FALSE:
{
*(Lng32 *)op_data[0] = 0;
}
break;
case ITM_RETURN_NULL:
{
*(Lng32 *)op_data[0] = -1;
}
break;
default:
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
retcode = ex_expr::EXPR_ERROR;
}
break;
}
return retcode;
}
ex_expr::exp_return_type ex_function_converttimestamp::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int64 juliantimestamp;
str_cpy_all((char *) &juliantimestamp, op_data[1], sizeof(juliantimestamp));
const Int64 minJuliantimestamp = (Int64) 1487311632 * (Int64) 100000000;
//SQ_LINUX #ifndef NA_HSC
const Int64 maxJuliantimestamp = (Int64) 2749273487LL * (Int64) 100000000 +
(Int64) 99999999;
if ((juliantimestamp < minJuliantimestamp) ||
(juliantimestamp > maxJuliantimestamp)) {
char tmpbuf[24];
memset(tmpbuf, 0, sizeof(tmpbuf) );
sprintf(tmpbuf, "%ld", juliantimestamp);
ExRaiseSqlError(heap, diagsArea, EXE_CONVERTTIMESTAMP_ERROR);
if(*diagsArea)
**diagsArea << DgString0(tmpbuf);
if(derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
short timestamp[8];
INTERPRETTIMESTAMP(juliantimestamp, timestamp);
short year = timestamp[0];
char month = (char) timestamp[1];
char day = (char) timestamp[2];
char hour = (char) timestamp[3];
char minute = (char) timestamp[4];
char second = (char) timestamp[5];
Lng32 fraction = timestamp[6] * 1000 + timestamp[7];
char *datetimeOpData = op_data[0];
str_cpy_all(datetimeOpData, (char *) &year, sizeof(year));
datetimeOpData += sizeof(year);
*datetimeOpData++ = month;
*datetimeOpData++ = day;
*datetimeOpData++ = hour;
*datetimeOpData++ = minute;
*datetimeOpData++ = second;
str_cpy_all(datetimeOpData, (char *) &fraction, sizeof(fraction));
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_dateformat::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
char *opData = op_data[1];
char *formatStr = op_data[2];
char *result = op_data[0];
if ((getDateFormat() == ExpDatetime::DATETIME_FORMAT_NUM1) ||
(getDateFormat() == ExpDatetime::DATETIME_FORMAT_NUM2))
{
// numeric to TIME conversion.
if(ExpDatetime::convNumericTimeToASCII(opData,
result,
getOperand(0)->getLength(),
getDateFormat(),
formatStr,
heap,
diagsArea) < 0) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
}
else
{
// Convert the given datetime value to an ASCII string value in the
// given format.
//
if ((DFS2REC::isAnyCharacter(getOperand(1)->getDatatype())) &&
(DFS2REC::isDateTime(getOperand(0)->getDatatype())))
{
Lng32 sourceLen = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(0);
if(datetimeOpType->convAsciiToDate(opData,
sourceLen,
result,
getOperand(0)->getLength(),
getDateFormat(),
heap,
diagsArea,
0) < 0) {
if (diagsArea && (*diagsArea) &&
(*diagsArea)->getNumber(DgSqlCode::ERROR_) == 0)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
}
return ex_expr::EXPR_ERROR;
}
}
else
{
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(1);
if(datetimeOpType->convDatetimeToASCII(opData,
result,
getOperand(0)->getLength(),
getDateFormat(),
formatStr,
heap,
diagsArea) < 0) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
}
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_dayofweek::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
Int64 interval;
short year;
char month;
char day;
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(1);
char *datetimeOpData = op_data[1];
str_cpy_all((char *) &year, datetimeOpData, sizeof(year));
datetimeOpData += sizeof(year);
month = *datetimeOpData++;
day = *datetimeOpData;
interval = datetimeOpType->getTotalDays(year, month, day);
#pragma nowarn(1506) // warning elimination
unsigned short result = (unsigned short)((interval + 1) % 7) + 1; // NT_PORT ( bd 12/9/96 ) cast to unsigned short
#pragma warn(1506) // warning elimination
str_cpy_all(op_data[0], (char *) &result, sizeof(result));
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_extract::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int64 result = 0;
if (getOperand(1)->getDatatype() == REC_DATETIME) {
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(1);
char *datetimeOpData = op_data[1];
rec_datetime_field opStartField;
rec_datetime_field opEndField;
rec_datetime_field extractStartField = getExtractField();
rec_datetime_field extractEndField = extractStartField;
if (extractStartField > REC_DATE_MAX_SINGLE_FIELD) {
extractStartField = REC_DATE_YEAR;
if (extractEndField == REC_DATE_YEARQUARTER_EXTRACT ||
extractEndField == REC_DATE_YEARMONTH_EXTRACT ||
extractEndField == REC_DATE_YEARQUARTER_D_EXTRACT ||
extractEndField == REC_DATE_YEARMONTH_D_EXTRACT)
extractEndField = REC_DATE_MONTH;
else {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
}
if (datetimeOpType->getDatetimeFields(datetimeOpType->getPrecision(),
opStartField,
opEndField) != 0) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
for (Int32 field = opStartField; field <= extractEndField; field++) {
switch (field) {
case REC_DATE_YEAR: {
short value;
if (field >= extractStartField && field <= extractEndField) {
str_cpy_all((char *) &value, datetimeOpData, sizeof(value));
result = value;
}
datetimeOpData += sizeof(value);
break;
}
case REC_DATE_MONTH:
case REC_DATE_DAY:
case REC_DATE_HOUR:
case REC_DATE_MINUTE:
if (field >= extractStartField && field <= extractEndField) {
switch (getExtractField())
{
case REC_DATE_YEARQUARTER_EXTRACT:
// 10*year + quarter - human readable quarter format
result = 10*result + ((*datetimeOpData)+2) / 3;
break;
case REC_DATE_YEARQUARTER_D_EXTRACT:
// 4*year + 0-based quarter - dense quarter format, better for MDAM
result = 4*result + (*datetimeOpData-1) / 3;
break;
case REC_DATE_YEARMONTH_EXTRACT:
// 100*year + month - human readable yearmonth format
result = 100*result + *datetimeOpData;
break;
case REC_DATE_YEARMONTH_D_EXTRACT:
// 12*year + 0-based month - dense month format, better for MDAM
result = 12*result + *datetimeOpData-1;
break;
default:
// regular extract of month, day, hour, minute
result = *datetimeOpData;
break;
}
}
datetimeOpData++;
break;
case REC_DATE_SECOND:
if (field == getExtractField()) {
result = *datetimeOpData;
datetimeOpData++;
short fractionPrecision = datetimeOpType->getScale();
if (fractionPrecision > 0) {
do {
result *= 10;
} while (--fractionPrecision > 0);
Lng32 fraction;
str_cpy_all((char *) &fraction, datetimeOpData, sizeof(fraction));
result += fraction;
}
}
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
}
} else {
Int64 interval;
switch (getOperand(1)->getLength()) {
case SQL_SMALL_SIZE: {
short value;
str_cpy_all((char *) &value, op_data[1], sizeof(value));
interval = value;
break;
}
case SQL_INT_SIZE: {
Lng32 value;
str_cpy_all((char *) &value, op_data[1], sizeof(value));
interval = value;
break;
}
case SQL_LARGE_SIZE:
str_cpy_all((char *) &interval, op_data[1], sizeof(interval));
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
rec_datetime_field startField;
if (ExpInterval::getIntervalStartField(getOperand(1)->getDatatype(), startField) != 0)
return ex_expr::EXPR_ERROR;
if (getExtractField() == startField)
result = interval;
else {
switch (getExtractField()) {
case REC_DATE_MONTH:
//
// The sign of the result of a modulus operation involving a negative
// operand is implementation-dependent according to the C++ reference
// manual. In this case, we prefer the result to be negative.
//
result = interval % 12;
if ((interval < 0) && (result > 0))
result = - result;
break;
case REC_DATE_HOUR:
//
// The sign of the result of a modulus operation involving a negative
// operand is implementation-dependent according to the C++ reference
// manual. In this case, we prefer the result to be negative.
//
result = interval % 24;
if ((interval < 0) && (result > 0))
result = - result;
break;
case REC_DATE_MINUTE:
//
// The sign of the result of a modulus operation involving a negative
// operand is implementation-dependent according to the C++ reference
// manual. In this case, we prefer the result to be negative.
//
result = interval % 60;
if ((interval < 0) && (result > 0))
result = - result;
break;
case REC_DATE_SECOND: {
Lng32 divisor = 60;
for (short fp = getOperand(1)->getScale(); fp > 0; fp--)
divisor *= 10;
result = interval;
interval = result / (Int64) divisor;
result -= interval * (Int64) divisor;
break;
}
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
}
}
copyInteger (op_data[0], getOperand(0)->getLength(), &result, sizeof(result));
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_juliantimestamp::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
ex_expr::exp_return_type retcode = ex_expr::EXPR_OK;
Int64 juliantimestamp;
char *datetimeOpData = op_data[1];
short year;
char month;
char day;
char hour;
char minute;
char second;
Lng32 fraction;
str_cpy_all((char *) &year, datetimeOpData, sizeof(year));
datetimeOpData += sizeof(year);
month = *datetimeOpData++;
day = *datetimeOpData++;
hour = *datetimeOpData++;
minute = *datetimeOpData++;
second = *datetimeOpData++;
str_cpy_all((char *) &fraction, datetimeOpData, sizeof(fraction));
short timestamp[] = {
year, month, day, hour, minute, second, (short)(fraction / 1000), (short)(fraction % 1000)
};
short error;
juliantimestamp = COMPUTETIMESTAMP(timestamp, &error);
if (error) {
char tmpbuf[24];
memset(tmpbuf, 0, sizeof(tmpbuf) );
sprintf(tmpbuf, "%ld", juliantimestamp);
ExRaiseSqlError(heap, diagsArea, EXE_JULIANTIMESTAMP_ERROR);
if(*diagsArea)
**diagsArea << DgString0(tmpbuf);
if(derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
str_cpy_all(op_data[0], (char *) &juliantimestamp, sizeof(juliantimestamp));
return retcode;
}
ex_expr::exp_return_type ex_function_exec_count::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
execCount_++;
str_cpy_all(op_data[0], (char *) &execCount_, sizeof(execCount_));
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_curr_transid::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
// this function is not used yet anywhere, whoever wants to start using
// it should fill in the missing code here
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
// -----------------------------------------------------------------------
// Helper function for CURRENT_USER and SESSION_USER function.
// Used by exp and UDR code to get the CURRENT_USER and SESSION_USER
// information. SESSION_USER is the user that is logged on to the
// current SQL session. CURRENT_USER is the one with whose privileges
// a SQL statement is executed, With Definer Rights SPJ, the CURRENT_USER is
// the owner of the SPJ while SESSION_USER is the user who invoked the SPJ.
//
// Returns the current login user name as a C-style string (null terminated)
// in inputUserNameBuffer parameter.
// (e.g. returns "Domain1\Administrator" on NT if logged
// in as Domain1\Administrator,
// "role-mgr" on NSK if logged in as alias "role-mgr",
// "ROLE.MGR" on NSK if logged in as Guardian userid ROLE.MGR)
// Returns FEOK as the return value on success, otherwise returns an error status.
// Returns FEBUFTOOSMALL if the input buffer supplied is not big enough to
// accommodate the actual user name.
// Optionally returns the actual length of the user name (in bytes) in
// actualLength parameter. Returns 0 as the actual length if the function returns
// an error code, except for FEBUFTOOSMALL return code in which case it
// returns the actual length so that the caller can get an idea of the minimum
// size of the input buffer to be provided.
// -----------------------------------------------------------------------
short exp_function_get_user(
OperatorTypeEnum userType, // IN - CURRENT_USER or SESSION_USER
char *inputUserNameBuffer, // IN - buffer for returning the user name
Lng32 inputBufferLength, // IN - length(max) of the above buffer in bytes
Lng32 *actualLength) // OUT optional - actual length of the user name
{
if (actualLength)
*actualLength = 0;
short result = FEOK;
Int32 lActualLen = 0;
#if !defined (__EID)
Int32 userID;
if (userType == ITM_SESSION_USER)
userID = ComUser::getSessionUser();
else
// Default to CURRENT_USER
userID = ComUser::getCurrentUser();
assert (userID != NA_UserIdDefault);
char userName[MAX_USERNAME_LEN+1];
Int16 status = ComUser::getUserNameFromUserID( (Int32) userID
, (char *)&userName
, (Int32) inputBufferLength
, lActualLen );
if (status == FEOK)
{
str_cpy_all(inputUserNameBuffer, userName, lActualLen);
inputUserNameBuffer[lActualLen] = '\0';
}
else
result = FEBUFTOOSMALL;
#endif
if (((result == FEOK) || (result == FEBUFTOOSMALL)) && actualLength)
*actualLength = lActualLen;
return result;
}
ex_expr::exp_return_type ex_function_ansi_user::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
const Lng32 MAX_USER_NAME_LEN = ComSqlId::MAX_LDAP_USER_NAME_LEN;
char username[MAX_USER_NAME_LEN + 1];
Lng32 username_len = 0;
short retcode = FEOK;
retcode = exp_function_get_user ( getOperType(),
username,
MAX_USER_NAME_LEN + 1,
&username_len
);
if (((retcode != FEOK) && (retcode != FENOTFOUND)) ||
((retcode == FEOK) && (username_len == 0)) ) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
str_cpy_all(op_data[0], username, username_len);
getOperand(0)->setVarLength(username_len, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_user::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int32 userIDLen = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
Int64 id64;
switch (userIDLen)
{
case SQL_SMALL_SIZE:
id64 = *((short *) op_data[1]);
break;
case SQL_INT_SIZE:
id64 = *((Lng32 *) op_data[1]);
break;
case SQL_LARGE_SIZE:
id64 = *((Int64 *) op_data[1]);
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (id64 < -SQL_INT32_MAX || id64 > SQL_INT32_MAX)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
Int32 authID = (Int32)(id64);
// *****************************************************************************
// * *
// * Code to handle functions AUTHNAME and AUTHTYPE. Piggybacked on USER *
// * function code in parser, binder, and optimizer. Perhaps there is a *
// * better way to implement. *
// * *
// * AUTHNAME invokes the porting layer, which calls CLI, as it may be *
// * necessary to read metadata (and therefore have a transaction within *
// * a transaction). *
// * *
// * AUTHTYPE calls Catman directly, as Catman knows the values and ranges *
// * for various types of authentication ID values. Examples include *
// * PUBLIC, SYSTEM, users, and roles. AUTHTYPE returns a single character *
// * that can be used within a case, if, or where clause. *
// * *
// *****************************************************************************
switch (getOperType())
{
case ITM_AUTHNAME:
{
Int16 result;
Int32 authNameLen = 0;
char authName[MAX_AUTHNAME_LEN + 1];
result = ComUser::getAuthNameFromAuthID(authID,
authName,
sizeof(authName),
authNameLen);
if (result != FEOK)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (authNameLen > getOperand(0)->getLength())
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
getOperand(0)->setVarLength(authNameLen, op_data[-MAX_OPERANDS]);
str_cpy_all(op_data[0], authName, authNameLen);
return ex_expr::EXPR_OK;
}
case ITM_AUTHTYPE:
{
char authType[2];
authType[1] = 0;
authType[0] = ComUser::getAuthType(authID);
getOperand(0)->setVarLength(1, op_data[-MAX_OPERANDS]);
str_cpy_all(op_data[0], authType, 1);
return ex_expr::EXPR_OK;
}
case ITM_USER:
case ITM_USERID:
default:
{
// Drop down to user code
}
}
Int32 userNameLen = 0;
char userName[MAX_USERNAME_LEN+1];
Int16 result = ComUser::getUserNameFromUserID(authID,
(char *)&userName,
MAX_USERNAME_LEN+1,
userNameLen);
if ((result != FEOK) && (result != FENOTFOUND))
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
else if (result == FENOTFOUND || userNameLen == 0)
{
// set the user name same as user id
// avoids exceptions if userID not present in USERS table
if (authID < 0)
{
userName[0] = '-';
str_itoa((ULng32)(-authID), &userName[1]);
}
else
{
str_itoa((ULng32)(authID), userName);
}
userNameLen = str_len(userName);
}
if (userNameLen > getOperand(0)->getLength())
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
getOperand(0)->setVarLength(userNameLen, op_data[-MAX_OPERANDS]);
str_cpy_all(op_data[0], userName, userNameLen);
return ex_expr::EXPR_OK;
};
////////////////////////////////////////////////////////////////////
//
// encodeKeyValue
//
// This routine encodes key values so that they can be sorted simply
// using binary collation. The routine is called by the executor.
//
// Note: The target MAY point to the source to change the original
// value.
//
////////////////////////////////////////////////////////////////////
void ex_function_encode::encodeKeyValue(Attributes * attr,
const char *source,
const char *varlenPtr,
char *target,
NABoolean isCaseInsensitive,
Attributes * tgtAttr,
char *tgt_varlen_ptr,
const Int32 tgtLength ,
CharInfo::Collation collation,
CollationInfo::CollationType collType)
{
Lng32 fsDatatype = attr->getDatatype();
Lng32 length = attr->getLength();
Lng32 precision = attr->getPrecision();
switch (fsDatatype) {
#if defined( NA_LITTLE_ENDIAN )
case REC_BIN8_SIGNED:
//
// Flip the sign bit.
//
*(UInt8*)target = *(UInt8*)source;
target[0] ^= 0200;
break;
case REC_BIN8_UNSIGNED:
case REC_BOOLEAN:
*(UInt8*)target = *(UInt8*)source;
break;
case REC_BIN16_SIGNED:
//
// Flip the sign bit.
//
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
target[0] ^= 0200;
break;
case REC_BPINT_UNSIGNED:
case REC_BIN16_UNSIGNED:
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
break;
case REC_BIN32_SIGNED:
//
// Flip the sign bit.
//
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
target[0] ^= 0200;
break;
case REC_BIN32_UNSIGNED:
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
break;
case REC_BIN64_SIGNED:
//
// Flip the sign bit.
//
*((_int64 *) target) = reversebytes( *((_int64 *) source) );
target[0] ^= 0200;
break;
case REC_BIN64_UNSIGNED:
*((UInt64 *) target) = reversebytes( *((UInt64 *) source) );
break;
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND:
switch(length)
{
case 2: // Signed 16 bit
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
break;
case 4: // Signed 32 bit
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
break;
case 8: // Signed 64 bit
*((_int64 *) target) = reversebytes( *((_int64 *) source) );
break;
default:
assert(FALSE);
break;
}; // switch(length)
target[0] ^= 0200;
break;
case REC_DATETIME: {
// This method has been modified as part of the MP Datetime
// Compatibility project. It has been made more generic so that
// it depends only on the start and end fields of the datetime type.
//
rec_datetime_field startField;
rec_datetime_field endField;
ExpDatetime *dtAttr = (ExpDatetime *)attr;
// Get the start and end fields for this Datetime type.
//
dtAttr->getDatetimeFields(dtAttr->getPrecision(),
startField,
endField);
// Copy all of the source to the destination, then reverse only
// those fields of the target that are longer than 1 byte
//
if (target != source)
str_cpy_all(target, source, length);
// Reverse the YEAR and Fractional precision fields if present.
//
char *ptr = target;
for(Int32 field = startField; field <= endField; field++) {
switch (field) {
case REC_DATE_YEAR:
// convert YYYY from little endian to big endian
//
*((unsigned short *) ptr) = reversebytes( *((unsigned short *) ptr) );
ptr += sizeof(short);
break;
case REC_DATE_MONTH:
case REC_DATE_DAY:
case REC_DATE_HOUR:
case REC_DATE_MINUTE:
// One byte fields are copied as is...
ptr++;
break;
case REC_DATE_SECOND:
ptr++;
// if there is a fraction, make it big endian
// (it is an unsigned long, beginning after the SECOND field)
//
if (dtAttr->getScale() > 0)
*((ULng32 *) ptr) = reversebytes( *((ULng32 *) ptr) );
break;
}
}
break;
}
#else
case REC_BIN8_SIGNED:
case REC_BIN16_SIGNED:
case REC_BIN32_SIGNED:
case REC_BIN64_SIGNED:
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND:
//
// Flip the sign bit.
//
if (target != source)
str_cpy_all(target, source, length);
target[0] ^= 0200;
break;
#endif
case REC_DECIMAL_LSE:
//
// If the number is negative, complement all the bytes. Otherwise, set
// the sign bit.
//
if (source[0] & 0200) {
for (Lng32 i = 0; i < length; i++)
#pragma nowarn(1506) // warning elimination
target[i] = ~source[i];
#pragma warn(1506) // warning elimination
} else {
if (target != source)
str_cpy_all(target, source, length);
target[0] |= 0200;
}
break;
case REC_NUM_BIG_UNSIGNED: {
BigNum type(length, precision, 0, 1);
type.encode(source, target);
break;
}
case REC_NUM_BIG_SIGNED: {
BigNum type(length, precision, 0, 0);
type.encode(source, target);
break;
}
case REC_IEEE_FLOAT32: {
//
// unencoded float (IEEE 754 - 1985 standard):
//
// +-+----------+---------------------+
// | | exponent | mantissa |
// | | (8 bits) | (23 bits) |
// +-+----------+---------------------+
// |
// +- Sign bit
//
// Encoded float (IEEE 754 - 1985 standard):
//
// +-+--------+-----------------------+
// | |Exponent| Mantissa |
// | |(8 bits)| (23 bits) |
// +-+--------+-----------------------+
// || |
// |+- Complemented if sign was neg.-+
// |
// +- Sign bit complement
//
// the following code is independent of the "endianess" of the
// architecture. Instead, it assumes IEEE 754 - 1985 standard
// for representation of floats
// source may not be aligned, move it to a temp var.
float floatsource;
str_cpy_all((char*)&floatsource, source, length);
ULng32 *dblword = (ULng32 *) &floatsource;
if (floatsource < 0) // the sign is negative,
*dblword = ~*dblword; // flip all the bits
else
floatsource = -floatsource; // complement the sign bit
// here comes the dependent part
#ifdef NA_LITTLE_ENDIAN
*(ULng32 *) target = reversebytes(*dblword);
#else
// *(unsigned long *) target = *dblword;
str_cpy_all(target, (char*)&floatsource, length);
#endif
break;
}
case REC_IEEE_FLOAT64: {
//
// unencoded double (IEEE 754 - 1985 standard):
//
// +-+--------- -+--------------------+
// | | exponent | mantissa |
// | | (11 bits) | (52 bits) |
// +-+--------- -+--------------------+
// |
// +- Sign bit
//
// Encoded double (IEEE 754 - 1985 standard):
//
// +-+-----------+--------------------+
// | | Exponent | Mantissa |
// | | (11 bits) | (52 bits) |
// +-+-----------+--------------------+
// || |
// |+- Complemented if sign was neg.-+
// |
// +- Sign bit complement
//
// the following code is independent of the "endianess" of the
// archtiecture. Instead, it assumes IEEE 754 - 1985 standard
// for representation of floats
//double doublesource = *(double *) source;
// source may not be aligned, move it to a temp var.
double doublesource;
str_cpy_all((char*)&doublesource, source, length);
Int64 *quadword = (Int64 *) &doublesource;
if (doublesource < 0) // the sign is negative,
*quadword = ~*quadword; // flip all the bits
else
doublesource = -doublesource; // complement the sign bit
// here comes the dependent part
#ifdef NA_LITTLE_ENDIAN
*(Int64 *) target = reversebytes(*quadword);
#else
// *(Int64 *) target = *quadword;
str_cpy_all(target, (char*)&doublesource, length);
#endif
break;
}
// LCOV_EXCL_START
case REC_BYTE_F_ASCII: {
if (CollationInfo::isSystemCollation(collation ))
{
Int16 nPasses = CollationInfo::getCollationNPasses(collation);
if (collType == CollationInfo::Sort ||
collType == CollationInfo::Compare)
{
encodeCollationKey(
(const UInt8 *)source,
length,
(UInt8 *)target,
tgtLength,
nPasses,
collation,
TRUE);
}
// LCOV_EXCL_STOP
else //search
{
Int32 effEncodedKeyLength = 0;
encodeCollationSearchKey(
(const UInt8 *)source,
length,
(UInt8 *)target,
tgtLength,
effEncodedKeyLength,
nPasses,
collation,
TRUE);
assert(tgtAttr && tgt_varlen_ptr);
tgtAttr->setVarLength(effEncodedKeyLength, tgt_varlen_ptr);
}
}
else
{
//------------------------------------------
if (target != source)
str_cpy_all(target, source, length);
if (isCaseInsensitive)
{
// upcase target
for (Int32 i = 0; i < length; i++)
{
target[i] = TOUPPER(source[i]);
}
}
//--------------------------
}
}
break;
case REC_BYTE_V_ASCII:
case REC_BYTE_V_ASCII_LONG:
{
Int32 vc_len = attr->getLength(varlenPtr);
if (CollationInfo::isSystemCollation(collation))
{
// LCOV_EXCL_START
Int16 nPasses = CollationInfo::getCollationNPasses(collation);
NABoolean rmTspaces = getRmTSpaces(collation);
if (collType == CollationInfo::Sort ||
collType == CollationInfo::Compare)
{
encodeCollationKey(
(UInt8 *)source,
(Int16)vc_len,
(UInt8 *)target,
tgtLength,
nPasses,
collation,
rmTspaces);
}
else
{
Int32 effEncodedKeyLength = 0;
encodeCollationSearchKey(
(UInt8 *)source,
(Int16)vc_len,
(UInt8 *)target,
tgtLength,
effEncodedKeyLength,
nPasses,
collation,
rmTspaces);
assert(tgtAttr && tgt_varlen_ptr);
tgtAttr->setVarLength(effEncodedKeyLength, tgt_varlen_ptr);
}
}
else
// LCOV_EXCL_STOP
{
//
// Copy the source to the target.
//
if (!isCaseInsensitive)
str_cpy_all(target, source, vc_len);
else
{
// upcase target
for (Int32 i = 0; i < vc_len; i++)
{
target[i] = TOUPPER(source[i]);
}
}
//
// Blankpad the target (if needed).
//
if (vc_len < length)
str_pad(&target[vc_len],
(Int32) (length - vc_len), ' ');
}
}
break;
// added for Unicode data type.
case REC_NCHAR_V_UNICODE:
{
Int32 vc_len = attr->getLength(varlenPtr);
//
// Copy the source to the target.
//
str_cpy_all(target, source, vc_len);
//
// Blankpad the target (if needed).
//
if (vc_len < length)
wc_str_pad((NAWchar*)&target[vc_len],
(Int32) (length - vc_len)/sizeof(NAWchar), unicode_char_set::space_char());
#if defined( NA_LITTLE_ENDIAN )
wc_swap_bytes((NAWchar*)target, length/sizeof(NAWchar));
#endif
break;
}
// added for Unicode data type.
case REC_NCHAR_F_UNICODE:
{
if (target != source)
str_cpy_all(target, source, length);
#if defined( NA_LITTLE_ENDIAN )
wc_swap_bytes((NAWchar*)target, length/sizeof(NAWchar));
#endif
break;
}
case REC_BYTE_V_ANSI:
{
short vc_len;
vc_len = strlen(source);
//
// Copy the source to the target.
//
str_cpy_all(target, source, vc_len);
//
// Blankpad the target (if needed).
//
if (vc_len < length)
str_pad(&target[vc_len], (Int32) (length - vc_len), ' ');
}
break;
default:
//
// Encoding is not needed. Just copy the source to the target.
//
if (target != source)
str_cpy_all(target, source, length);
break;
}
}
////////////////////////////////////////////////////////////////////
// class ex_function_encode
////////////////////////////////////////////////////////////////////
ex_function_encode::ex_function_encode(){};
// LCOV_EXCL_START
ex_function_encode::ex_function_encode(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
short descFlag)
: ex_function_clause(oper_type, 2, attr, space),
flags_(0),
collation_((Int16) CharInfo::DefaultCollation)
{
if (descFlag)
setIsDesc(TRUE);
else
setIsDesc(FALSE);
setCollEncodingType(CollationInfo::Sort);
};
// LCOV_EXCL_STOP
ex_function_encode::ex_function_encode(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
CharInfo::Collation collation,
short descFlag,
CollationInfo::CollationType collType)
: ex_function_clause(oper_type, 2, attr, space),
flags_(0),
collation_((Int16)collation)
{
if (descFlag)
setIsDesc(TRUE);
else
setIsDesc(FALSE);
setCollEncodingType(collType);
};
ex_expr::exp_return_type ex_function_encode::processNulls(
char * op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
if ((CollationInfo::isSystemCollation((CharInfo::Collation) collation_)) &&
getCollEncodingType() != CollationInfo::Sort)
{
return ex_clause::processNulls(op_data,heap,diagsArea);
}
else if (regularNullability())
{
return ex_clause::processNulls(op_data,heap,diagsArea);
}
// if value is missing,
// then move max or min value to result.
if (getOperand(1)->getNullFlag() &&
(!op_data[1])) // missing value (is a null value)
{
if (NOT isDesc())
{
// NULLs sort high for ascending comparison.
// Pad result with highest value.
// For SQL/MP tables, DP2 expects missing value columns to be
// 0 padded after the null-indicator.
str_pad(op_data[2 * MAX_OPERANDS],
(Int32)getOperand(0)->getStorageLength(), '\0');
str_pad(op_data[2 * MAX_OPERANDS],
ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH,
'\377');
}
else
{
// NULLs sort low for descending comparison.
// Pad result with lowest value.
str_pad(op_data[2 * MAX_OPERANDS],
(Int32)getOperand(0)->getStorageLength(),
'\377');
str_pad(op_data[2 * MAX_OPERANDS],
ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH,
'\0');
}
return ex_expr::EXPR_NULL;
}
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_encode::evalDecode(char *op_data[],
CollHeap* heap)
{
char * result = op_data[0];
Attributes *srcOp = getOperand(1);
decodeKeyValue(srcOp,
isDesc(),
op_data[1],
op_data[-MAX_OPERANDS+1],
result,
op_data[-MAX_OPERANDS],
FALSE);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_encode::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea**)
{
if (isDecode())
{
return evalDecode(op_data, heap);
}
Int16 prependedLength = 0;
char * result = op_data[0];
Attributes *tgtOp = getOperand(0);
Attributes *srcOp = getOperand(1);
if ((srcOp->getNullFlag()) && // nullable
(NOT regularNullability()))
{
// If target is aligned format then can't use the 2 byte null here ...
assert( !tgtOp->isSQLMXAlignedFormat() );
// if sort is set for char types with collations (including default)
if (getCollEncodingType() == CollationInfo::Sort)
{
// value cannot be null in this proc. That is handled in process_nulls.
str_pad(result, ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH, '\0');
result += ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH;
prependedLength = ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH;
}
}
if (srcOp->isComplexType())
((ComplexType *)srcOp)->encode(op_data[1], result, isDesc());
else
{
Int32 tgtLength = tgtOp->getLength() - prependedLength ;
encodeKeyValue(srcOp,
op_data[1],
op_data[-MAX_OPERANDS+1],
result,
caseInsensitive(),
tgtOp,
op_data[-MAX_OPERANDS],
tgtLength,
(CharInfo::Collation) collation_,
getCollEncodingType());
}
if (isDesc())
{
// compliment all bytes
for (Lng32 k = 0; k < tgtOp->getLength(); k++)
op_data[0][k] = (char)(~(op_data[0][k]));
}
return ex_expr::EXPR_OK;
}
void ex_function_encode::getCollationWeight(
CharInfo::Collation collation,
Int16 pass,
UInt16 chr,
UInt8 * weightStr,
Int16 & weightStrLen)
{
UChar wght = getCollationWeight(collation, pass, chr);
switch (collation)
{
case CharInfo::CZECH_COLLATION:
case CharInfo::CZECH_COLLATION_CI:
{
if ((CollationInfo::Pass)pass != CollationInfo::SecondPass)
{
if (wght > 0 )
{
weightStr[0] = wght;
weightStrLen = 1;
}
else
{
weightStrLen = 0;
}
}
else
{
if (getCollationWeight(collation, CollationInfo::FirstPass, chr) > 0 )
{
weightStr[0] = wght;
weightStrLen = 1;
}
else
{
weightStr[0] = 0;
weightStr[1] = wght;
weightStrLen = 2;
}
}
}
break;
default:
{
if (wght > 0 )
{
weightStr[0] = wght;
weightStrLen = 1;
}
else
{
weightStrLen = 0;
}
}
}
}
// LCOV_EXCL_START
unsigned char ex_function_encode::getCollationWeight(
CharInfo::Collation collation,
Int16 pass,
UInt16 chr)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].weightTable[pass][chr];
}
Int16 ex_function_encode::getNumberOfDigraphs( const CharInfo::Collation collation)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].numberOfDigraphs ;
}
UInt8 * ex_function_encode::getDigraph(const CharInfo::Collation collation, const Int32 digraphNum)
{
return (UInt8 *) collParams[CollationInfo::getCollationParamsIndex(collation)].digraphs[digraphNum] ;
}
Int16 ex_function_encode::getDigraphIndex(const CharInfo::Collation collation, const Int32 digraphNum)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].digraphIdx[digraphNum];
}
NABoolean ex_function_encode::getRmTSpaces(const CharInfo::Collation collation)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].rmTSpaces;
}
NABoolean ex_function_encode::getNumberOfChars(const CharInfo::Collation collation)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].numberOfChars;
}
NABoolean ex_function_encode::isOneToOneCollation(const CharInfo::Collation collation)
{
for (UInt16 i =0 ; i < getNumberOfChars(collation); i++)
{
for (UInt16 j =i +1 ; j < getNumberOfChars(collation); j++)
{
NABoolean isOneToOne = FALSE;
for (Int16 pass=0 ; pass < CollationInfo::getCollationNPasses(collation); pass++)
{
if (getCollationWeight(collation,pass,i) != getCollationWeight(collation,pass,j) )
{
isOneToOne = TRUE;
}
}
if (!isOneToOne)
{
return FALSE;
}
}
}
return TRUE;
}
void ex_function_encode::encodeCollationKey(const UInt8 * src,
Int32 srcLength,
UInt8 * encodeKey,
const Int32 encodedKeyLength,
Int16 nPasses,
CharInfo::Collation collation,
NABoolean rmTSpaces )
{
assert (CollationInfo::isSystemCollation(collation));
UInt8 * ptr;
if (src[0] == CollationInfo::getCollationMaxChar(collation))
{
str_pad((char*) encodeKey, srcLength, CollationInfo::getCollationMaxChar(collation));
if (str_cmp((char*)src, (char*)encodeKey, srcLength) == 0)
{
str_pad((char*) encodeKey, encodedKeyLength,'\377' );
return;
}
}
if (src[0] == '\0')
{
str_pad((char*) encodeKey, encodedKeyLength, '\0');
if (str_cmp((char*)src, (char*)encodeKey,srcLength) == 0)
{
return;
}
}
Int16 charNum=0;
NABoolean hasDigraphs = FALSE;
Int32 trailingSpaceLength =0;
UInt8 digraph[2];
digraph[0]=digraph[1]=0;
Int16 weightStrLength=0;
ptr= encodeKey;
/////////////////////////////////////////////
for ( Int32 i = srcLength -1 ; rmTSpaces && i> 0 && src[i]== 0x20; i--)
{
trailingSpaceLength++;
}
for (short i= CollationInfo::FirstPass; i< nPasses; i++)
{
if (i != CollationInfo::FirstPass)
{
*ptr++= 0x0;
}
if ((i == CollationInfo::FirstPass) ||
(i != CollationInfo::FirstPass && hasDigraphs))
{
//loop through the chars in the string, find digraphs an assighn weights
for (Int32 srcIdx = 0; srcIdx < srcLength- trailingSpaceLength; srcIdx++)
{
digraph[0] = digraph[1];
digraph[1] = src[srcIdx];
NABoolean digraphFound = FALSE;
for (Int32 j = 0 ; j < getNumberOfDigraphs(collation); j++)
{
if (digraph[0] == getDigraph(collation, j)[0] &&
digraph[1] == getDigraph(collation, j)[1])
{
digraphFound = hasDigraphs = TRUE;
charNum = getDigraphIndex(collation,j);
ptr--;
break;
}
}
if (!digraphFound)
{
charNum = src[srcIdx];
}
getCollationWeight(collation,i, charNum,ptr,weightStrLength);
ptr = ptr + weightStrLength;
}
}
else
{
for (Int32 srcIdx = 0; srcIdx < srcLength- trailingSpaceLength; srcIdx++)
{
charNum = src[srcIdx];
getCollationWeight(collation, i, charNum,ptr,weightStrLength);
ptr = ptr + weightStrLength;
}
}
}
str_pad( (char *) ptr,(encodeKey - ptr) + encodedKeyLength, '\0');
} // ex_function_encode::encodeCollationKey
void ex_function_encode::encodeCollationSearchKey(const UInt8 * src,
Int32 srcLength,
UInt8 * encodeKey,
const Int32 encodedKeyLength,
Int32 & effEncodedKeyLength,
Int16 nPasses,
CharInfo::Collation collation,
NABoolean rmTSpaces )
{
assert (CollationInfo::isSystemCollation(collation));
UInt8 * ptr;
Int16 charNum=0;
NABoolean hasDigraphs = FALSE;
Int32 trailingSpaceLength =0;
UInt8 digraph[2];
digraph[0]=digraph[1]=0;
ptr= encodeKey;
/////////////////////////////////////////////
for ( Int32 i = srcLength -1 ; rmTSpaces && i> 0 && src[i]== 0x20; i--)
{
trailingSpaceLength++;
}
for (Int32 srcIdx = 0; srcIdx < srcLength- trailingSpaceLength; srcIdx++)
{
digraph[0] = digraph[1];
digraph[1] = src[srcIdx];
NABoolean digraphFound = FALSE;
for (Int32 j = 0 ; j < getNumberOfDigraphs(collation); j++)
{
if (digraph[0] == getDigraph(collation, j)[0] &&
digraph[1] == getDigraph(collation, j)[1])
{
digraphFound = hasDigraphs = TRUE;
charNum = getDigraphIndex(collation,j);
ptr = ptr - nPasses;
break;
}
}
if (!digraphFound)
{
charNum = src[srcIdx];
}
//don't include ignorable characters
short ignorable = 0;
for (short np = 0; np < nPasses ; np++)
{
ptr[np]= getCollationWeight(collation, np, charNum);
if (ptr[np] == '\0')
{
ignorable++;
}
}
if (ignorable != nPasses) //
{
ptr = ptr + nPasses;
}
if (digraphFound &&
ignorable != nPasses)
{
for (short np = CollationInfo::FirstPass; np < nPasses ; np++)
{
ptr[np]= '\0';
}
ptr = ptr + nPasses;
}
}
effEncodedKeyLength = ptr - encodeKey ;
str_pad( (char *) ptr,(encodeKey - ptr) + encodedKeyLength, '\0');
} // ex_function_encode::encodeCollationSearchKey
// LCOV_EXCL_STOP
////////////////////////////////////////////////////////////////////////
// class ex_function_explode_varchar
////////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ex_function_explode_varchar::processNulls(
char * op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
Attributes *tgt = getOperand(0);
if (getOperand(1)->getNullFlag() && (!op_data[1])) // missing value (is a null value)
{
if (tgt->getNullFlag()) // if result is nullable
{
// move null value to result
ExpTupleDesc::setNullValue( op_data[0],
tgt->getNullBitIndex(),
tgt->getTupleFormat() );
if (forInsert_)
{
// move 0 to length bytes
tgt->setVarLength(0, op_data[MAX_OPERANDS]);
} // for Insert
else
{
// move maxLength to result length bytes
tgt->setVarLength(tgt->getLength(), op_data[MAX_OPERANDS]);
}
return ex_expr::EXPR_NULL; // indicate that a null input was processed
}
else
{
// Attempt to put NULL into column with NOT NULL NONDROPPABLE constraint.
ExRaiseFunctionSqlError(heap, diagsArea, EXE_ASSIGNING_NULL_TO_NOT_NULL,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
} // source is a null value
// first operand is not null -- set null indicator in result if needed
if (tgt->getNullFlag())
{
ExpTupleDesc::clearNullValue( op_data[0],
tgt->getNullBitIndex(),
tgt->getTupleFormat() );
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_explode_varchar::eval(char *op_data[],
CollHeap*heap,
ComDiagsArea**diagsArea)
{
if (forInsert_)
{
// move source to target. No blankpadding.
#pragma nowarn(1506) // warning elimination
return convDoIt(op_data[1],
getOperand(1)->getLength(op_data[-MAX_OPERANDS + 1]),
getOperand(1)->getDatatype(),
getOperand(1)->getPrecision(),
getOperand(1)->getScale(),
op_data[0],
getOperand(0)->getLength(),
getOperand(0)->getDatatype(),
getOperand(0)->getPrecision(),
getOperand(0)->getScale(),
op_data[-MAX_OPERANDS],
getOperand(0)->getVCIndicatorLength(),
heap,
diagsArea);
#pragma warn(1506) // warning elimination
}
else
{
// move source to target. Blankpad target to maxLength.
#pragma nowarn(1506) // warning elimination
if (convDoIt(op_data[1],
getOperand(1)->getLength(op_data[-MAX_OPERANDS + 1]),
getOperand(0)->getDatatype(),
getOperand(1)->getPrecision(),
getOperand(1)->getScale(),
op_data[0],
getOperand(0)->getLength(),
REC_BYTE_F_ASCII,
getOperand(0)->getPrecision(),
getOperand(0)->getScale(),
NULL,
0,
heap,
diagsArea))
return ex_expr::EXPR_ERROR;
#pragma warn(1506) // warning elimination
// Move max length to length bytes of target.
getOperand(0)->setVarLength(getOperand(0)->getLength(),
op_data[-MAX_OPERANDS]);
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ex_function_hash
////////////////////////////////////////////////////////////////////
ULng32 ex_function_hash::HashHash(ULng32 inValue) {
// Hashhash -
// input : inValue - double word to be hashed
// output : 30-bit hash values uniformly distributed (mod s) for
// any s < 2**30
// This algorithm creates near-uniform output for arbitrarily distributed
// input by selecting for each fw of the key a quasi-random universal
// hash function from the class of linear functions ax + b (mod p)
// over the field of integers modulo the prime 2**31-1. The output is at
// least comparable in quality to cubics of the form
// ax**3 + bx**2 + cx + d (mod p), and is considerably closer to true
// uniformity than a single linear function chosen once per execution.
// The latter preserve the uniform 2nd central moment of bucket totals,
// and the former the 4th central moment. For probabilistic counting
// applications, the theoretical standard error cannot be achieved with
// less than cubic polynomials, but the present algorithm is approx 3-5x
// in speed. (Cf. histogram doc. for bibliography, but especially:
// Carter and Wegman, "Universal Clases of Hash Functions",
// Journ. Comp. Sys. Sci., 18: April 1979, pp. 145-154
// 22: 1981, pp. 265-279
// Dietzfelbinger, et al., "Polynomial Hash Functions...",
// ICALP '92, pp. 235-246. )
// N.B. - For modular arithmetic the 64-bit product of two 32-bit
// operands must be reduced (mod p). The high-order 32 bits are available
// in hardware but not necessarily through C syntax.
// Two additional optimizations should be noted:
// 1. Instead of processing 3-byte operands, as would be required with
// universal hashing over the field 2**31-1, with alignment delays, we
// process fullwords, and choose distinct 'random' coefficients for
// 2 keys congruent (mod p) using a 32-bit function, and then proceed
// with modular linear hashing over the smaller field.
// 2. For p = 2**c -1 for any c, shifts, and's and or's can be substituted
// for division, as recommended by Carter and Wegman. In addition, the
// output distribution is unaffected (i.e. with probability
// < 1/(2**31-1) if we omit tests for 0 (mod p).
// To reduce a (mod p), create k1 and k2 (<= p) with a = (2**31)k1 + k2,
// and reduce again to (2**31)k3 + k4, where k4 < 2**31 and k3 = 0 or 1.
// Multi-word keys:
// If k = k1||...||kn we compute the quasi-random coefficients c & d using
// ki, but take h(ki) = c*(ki xor h(ki-1)) + d, where h(k0) = 0, and use
// H(k) = h(kn). This precludes the commutative anomaly
// H(k || k') = H(k' || k)
register ULng32 u, v, c, d, k0;
ULng32 a1, a2, b1, b2;
//SQ_LINUX #ifndef NA_HSC
ULng32 c1 = (ULng32)5233452345LL;
ULng32 c2 = (ULng32)8578458478LL;
ULng32 d1 = 1862598173LL;
ULng32 d2 = 3542657857LL;
ULng32 hashValue = 0;
ULng32 k = inValue;
u = (c1 >> 16) * (k >> 16);
v = c1 * k;
c = u ^ v ^ c2;
u = (d1 >> 16) * (k >> 16);
v = d1 * k;
d = u ^ v ^ d2;
c = ((c & 0x80000000) >> 31) + (c & 0x7fffffff);
d = ((d & 0x80000000) >> 31) + (d & 0x7fffffff);
/* compute hash value 1 */
k0 = hashValue ^ k;
/*hmul(c,k0);
u=u0; v=v0;*/
a1 = c >> 16;
a2 = c & 0xffff;
b1 = k0 >> 16;
b2 = k0 & 0xffff;
v = (((a1 * b2) & 0xffff) + ((b1 * a2) & 0xffff));
u = a1 * b1 + (((a1 * b2) >> 16) + ((b1 * a2) >> 16))
+ ((v & 0x10000) >> 16);
v = c * k0;
if (v < (a2 * b2))
u++;
u = u << 1;
u = ((v & 0x80000000) >> 31) | u;
v = v & 0x7fffffff;
v = u + v;
v = ((v & 0x80000000) >> 31) + (v & 0x7fffffff);
/*v = ((v & 0x80000000) >> 31) + (v & 0x7fffffff);
if ( v == 0x7fffffff) v = 0;*/
v = v + d;
v = ((v & 0x80000000) >> 31) + (v & 0x7fffffff);
/*v = ((v & 0x80000000) >> 31) + (v & 0x7fffffff);
if ( v == 0x7fffffff) v = 0;*/
return (v);
};
ex_expr::exp_return_type ex_function_hash::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
Attributes *srcOp = getOperand(1);
ULng32 hashValue = 0;
if (srcOp->getNullFlag() && (! op_data[ -(2 * MAX_OPERANDS) + 1 ]))
{
// operand is a null value. All null values hash to
// the same hash value. Choose any arbitrary constant
// number as the hash value.
hashValue = ExHDPHash::nullHashValue; //;666654765;
}
else
{
// get the actual length stored in the data, or fixed length
Lng32 length = srcOp->getLength(op_data[-MAX_OPERANDS + 1]);
// if VARCHAR, skip trailing blanks and adjust length.
if (srcOp->getVCIndicatorLength() > 0) {
switch ( srcOp->getDatatype() ) {
// added to correctly handle VARNCHAR.
case REC_NCHAR_V_UNICODE:
{
// skip trailing blanks
NAWchar* wstr = (NAWchar*)(op_data[1]);
Lng32 wstr_length = length / sizeof(NAWchar);
while ((wstr_length > 0) &&
( wstr[wstr_length-1] == unicode_char_set::space_char())
)
wstr_length--;
length = sizeof(NAWchar)*wstr_length;
}
break;
default:
//case REC_BYTE_V_ASCII:
// skip trailing blanks
while ((length > 0) &&
(op_data[1][length-1] == ' '))
length--;
break;
}
}
UInt32 flags = ExHDPHash::NO_FLAGS;
switch(srcOp->getDatatype()) {
case REC_NCHAR_V_UNICODE:
case REC_NCHAR_V_ANSI_UNICODE:
flags = ExHDPHash::SWAP_TWO;
break;
}
hashValue = ExHDPHash::hash(op_data[1], flags, length);
};
*(ULng32 *)op_data[0] = hashValue;
return ex_expr::EXPR_OK;
};
Lng32 ex_function_hivehash::hashForCharType(char* data, Lng32 length)
{
// To compute: SUM (i from 0 to n-1) (s(i) * 31^(n-1-i)
ULng32 resultCopy = 0;
ULng32 result = (ULng32)data[0];
for (Lng32 i=1; i<length; i++ ) {
// perform result * 31, optimized as (result <<5 - result)
resultCopy = result;
result <<= 5;
result -= resultCopy;
result += (ULng32)(data[i]);
}
return result;
}
ex_expr::exp_return_type ex_function_hivehash::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
Attributes *srcOp = getOperand(1);
ULng32 hashValue = 0;
Lng32 length;
if (srcOp->getNullFlag() && (! op_data[ -(2 * MAX_OPERANDS) + 1 ]))
{
// operand is a null value. All null values hash to the same hash value.
hashValue = 0; // hive semantics: hash(NULL) = 0
} else
if ( (DFS2REC::isSQLVarChar(srcOp->getDatatype()) ||
DFS2REC::isANSIVarChar(srcOp->getDatatype())) &&
getOperand(1)->getVCIndicatorLength() > 0 )
{
length = srcOp->getLength(op_data[-MAX_OPERANDS + 1]);
hashValue = ex_function_hivehash::hashForCharType(op_data[1],length);
} else
if ( DFS2REC::isSQLFixedChar(srcOp->getDatatype()) ) {
length = srcOp->getLength();
hashValue = ex_function_hivehash::hashForCharType(op_data[1],length);
} else
if ( DFS2REC::isBinary(srcOp->getDatatype()) ) {
hashValue = *(ULng32*)(op_data[1]);
} // TBD: other SQ types
*(ULng32 *)op_data[0] = hashValue;
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ExHashComb
////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ExHashComb::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// always assume that both operands and result are of the same
// (unsigned) type and length
// with built-in long long type we could also support 8 byte integers
ULng32 op1, op2;
switch (getOperand(0)->getStorageLength())
{
case 4:
op1 = *((ULng32 *) op_data[1]);
op2 = *((ULng32 *) op_data[2]);
*((ULng32 *) op_data[0]) = ((op1 << 1) | (op1 >> 31)) ^ op2;
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ExHiveHashComb
////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ExHiveHashComb::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// always assume that both operands and result are of the same
// (unsigned) type and length
// with built-in long long type we could also support 8 byte integers
ULng32 op1, op2;
switch (getOperand(0)->getStorageLength())
{
case 4:
op1 = *((ULng32 *) op_data[1]);
op2 = *((ULng32 *) op_data[2]);
// compute op1 * 31 + op2, optimized as op1 << 5 - op1 + op2
*((ULng32 *) op_data[0]) = op1 << 5 - op1 + op2;
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
// -------------------------------------------------------------
// Hash Functions used by Hash Partitioning. These functions cannot
// change once Hash Partitioning is released! Defined for all data
// types, returns a 32 bit non-nullable hash value for the data item.
// The ::hash() function uses a loop over the key bytes; the other
// hash2()/hash4()/hash8() are more efficient but are only applicable
// to keys whose sizes are known at compile time: 2/4/8 bytes.
//--------------------------------------------------------------
ULng32 ExHDPHash::hash(const char *data, UInt32 flags, Int32 length)
{
ULng32 hashValue = 0;
unsigned char *valp = (unsigned char *)data;
Int32 iter = 0; // iterator over the key bytes, if needed
switch(flags) {
case NO_FLAGS:
case SWAP_EIGHT:
{
// Speedup for long keys - compute first 8 bytes fast (the rest with a loop)
if ( length >= 8 ) {
hashValue = hash8(data, flags); // do the first 8 bytes fast
// continue with the 9-th byte (only when length > 8 )
valp = (unsigned char *)&data[8];
iter = 8;
}
for(; iter < length; iter++) {
// Make sure the hashValue is sensitive to the byte position.
// One bit circular shift.
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*valp++];
}
break;
}
case SWAP_TWO:
{
// Speedup for long keys - compute first 8 bytes fast (the rest with a loop)
if ( length >= 8 ) {
hashValue = hash8(data, flags); // do the first 8 bytes fast
// continue with the 9-th byte (only when length > 8 )
valp = (unsigned char *)&data[8];
iter = 8;
}
// Loop over all the bytes of the value and compute the hash value.
for(; iter < length; iter+=2) {
// Make sure the hashValue is sensitive to the byte position.
// One bit circular shift.
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+1)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*valp];
valp += 2;
}
break;
}
case SWAP_FOUR:
{
hashValue = hash4(data, flags);
break;
}
case (SWAP_FIRSTTWO | SWAP_LASTFOUR):
case SWAP_FIRSTTWO:
case SWAP_LASTFOUR:
{
if((flags & SWAP_FIRSTTWO) != 0) {
hashValue = randomHashValues[*(valp+1)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*valp];
valp += 2;
iter += 2;
}
if((flags & SWAP_LASTFOUR) != 0) {
length -= 4;
}
for(; iter < length; iter++) {
// Make sure the hashValue is sensitive to the byte position.
// One bit circular shift.
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*valp++];
}
if((flags & SWAP_LASTFOUR) != 0) {
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+3)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+2)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+1)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+0)];
}
break;
}
default:
assert(FALSE);
}
return hashValue;
}
ex_expr::exp_return_type ExHDPHash::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
Attributes *srcOp = getOperand(1);
ULng32 hashValue;
if (srcOp->getNullFlag() && (! op_data[ -(2 * MAX_OPERANDS) + 1 ]))
{
// operand is a null value. All null values hash to
// the same hash value. Choose any arbitrary constant
// number as the hash value.
//
hashValue = ExHDPHash::nullHashValue; //666654765;
}
else {
Int32 length = (Int32)srcOp->getLength(op_data[-MAX_OPERANDS + 1]);
// if VARCHAR, skip trailing blanks and adjust length.
if (srcOp->getVCIndicatorLength() > 0) {
switch ( srcOp->getDatatype() ) {
// added to correctly handle VARNCHAR.
case REC_NCHAR_V_UNICODE:
{
// skip trailing blanks
NAWchar* wstr = (NAWchar*)(op_data[1]);
Int32 wstr_length = length / sizeof(NAWchar);
while ((wstr_length > 0) &&
( wstr[wstr_length-1] == unicode_char_set::space_char()))
wstr_length--;
length = sizeof(NAWchar) * wstr_length;
}
break;
default:
// skip trailing blanks
while ((length > 0) &&
(op_data[1][length-1] == ' '))
length--;
break;
}
}
UInt32 flags = NO_FLAGS;
switch(srcOp->getDatatype()) {
case REC_NUM_BIG_UNSIGNED:
case REC_NUM_BIG_SIGNED:
case REC_BIN16_SIGNED:
case REC_BIN16_UNSIGNED:
case REC_NCHAR_F_UNICODE:
case REC_NCHAR_V_UNICODE:
case REC_NCHAR_V_ANSI_UNICODE:
flags = SWAP_TWO;
break;
case REC_BIN32_SIGNED:
case REC_BIN32_UNSIGNED:
case REC_IEEE_FLOAT32:
flags = SWAP_FOUR;
break;
case REC_BIN64_SIGNED:
case REC_BIN64_UNSIGNED:
case REC_IEEE_FLOAT64:
flags = SWAP_EIGHT;
break;
case REC_DATETIME:
{
rec_datetime_field start;
rec_datetime_field end;
ExpDatetime *datetime = (ExpDatetime*) srcOp;
datetime->getDatetimeFields(srcOp->getPrecision(), start, end);
if(start == REC_DATE_YEAR) {
flags = SWAP_FIRSTTWO;
}
if(end == REC_DATE_SECOND && srcOp->getScale() > 0) {
flags |= SWAP_LASTFOUR;
}
}
break;
default:
if(srcOp->getDatatype() >= REC_MIN_INTERVAL &&
srcOp->getDatatype() <= REC_MAX_INTERVAL) {
if (srcOp->getLength() == 8)
flags = SWAP_EIGHT;
else if (srcOp->getLength() == 4)
flags = SWAP_FOUR;
else if (srcOp->getLength() == 2)
flags = SWAP_TWO;
else
assert(FALSE);
}
}
hashValue = hash(op_data[1], flags, length);
}
*(ULng32 *)op_data[0] = hashValue;
return ex_expr::EXPR_OK;
} // ExHDPHash::eval()
// --------------------------------------------------------------
// This function is used to combine two hash values to produce a new
// hash value. Used by Hash Partitioning. This function cannot change
// once Hash Partitioning is released! Defined for all data types,
// returns a 32 bit non-nullable hash value for the data item.
// --------------------------------------------------------------
ex_expr::exp_return_type ExHDPHashComb::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// always assume that both operands and result are of the same
// (unsigned) type and length
assert(getOperand(0)->getStorageLength() == 4 &&
getOperand(1)->getStorageLength() == 4 &&
getOperand(2)->getStorageLength() == 4);
ULng32 op1, op2;
op1 = *((ULng32 *) op_data[1]);
op2 = *((ULng32 *) op_data[2]);
// One bit, circular shift
op1 = ((op1 << 1) | (op1 >> 31));
op1 = op1 ^ op2;
*((ULng32 *) op_data[0]) = op1;
return ex_expr::EXPR_OK;
} // ExHDPHashComb::eval()
// ex_function_replace_null
//
ex_expr::exp_return_type
ex_function_replace_null::processNulls(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_replace_null::eval(char *op_data[],
CollHeap*,
ComDiagsArea **) {
Attributes *tgt = getOperand(0);
// Mark the result as non-null
if(tgt->getNullFlag())
ExpTupleDesc::clearNullValue(op_data[ -(2 * MAX_OPERANDS) ],
tgt->getNullBitIndex(),
tgt->getTupleFormat());
// If the input is NULL, replace it with the value in op_data[3]
if (! op_data[ - (2 * MAX_OPERANDS) + 1]) {
for(Lng32 i=0; i < tgt->getStorageLength(); i++)
op_data[0][i] = op_data[3][i];
}
else {
for(Lng32 i=0; i < tgt->getStorageLength(); i++)
op_data[0][i] = op_data[2][i];
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ex_function_mod
////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ex_function_mod::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int32 lenr = (Int32) getOperand(0)->getLength();
Int32 len1 = (Int32) getOperand(1)->getLength();
Int32 len2 = (Int32) getOperand(2)->getLength();
Int64 op1, op2, result;
switch (len1)
{
case 1:
op1 = *((Int8 *) op_data[1]);
break;
case 2:
op1 = *((short *) op_data[1]);
break;
case 4:
op1 = *((Lng32 *) op_data[1]);
break;
case 8:
op1 = *((Int64 *) op_data[1]);
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
switch (len2)
{
case 1:
op2 = *((Int8 *) op_data[2]);
break;
case 2:
op2 = *((short *) op_data[2]);
break;
case 4:
op2 = *((Lng32 *) op_data[2]);
break;
case 8:
op2 = *((Int64 *) op_data[2]);
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
if (op2 == 0)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_DIVISION_BY_ZERO,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
result = op1 % op2;
switch (lenr)
{
case 1:
*((Int8 *) op_data[0]) = (short) result;
break;
case 2:
*((short *) op_data[0]) = (short) result;
break;
case 4:
*((Lng32 *) op_data[0]) = (Lng32)result;
break;
case 8:
*((Int64 *) op_data[0]) = result;
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ex_function_mask
////////////////////////////////////////////////////////////////////
// LCOV_EXCL_START
ex_expr::exp_return_type ex_function_mask::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// always assume that both operands and result are of the same
// (unsigned) type and length
// with built-in long long type we could also support 8 byte integers
ULng32 op1, op2, result;
switch (getOperand(0)->getStorageLength())
{
case 1:
op1 = *((UInt8 *) op_data[1]);
op2 = *((UInt8 *) op_data[2]);
if(getOperType() == ITM_MASK_SET) {
result = op1 | op2;
} else {
result = op1 & ~op2;
}
*((unsigned short *) op_data[0]) = (unsigned short) result;
break;
case 2:
op1 = *((unsigned short *) op_data[1]);
op2 = *((unsigned short *) op_data[2]);
if(getOperType() == ITM_MASK_SET) {
result = op1 | op2;
} else {
result = op1 & ~op2;
}
*((unsigned short *) op_data[0]) = (unsigned short) result;
break;
case 4:
op1 = *((ULng32 *) op_data[1]);
op2 = *((ULng32 *) op_data[2]);
if(getOperType() == ITM_MASK_SET) {
result = op1 | op2;
} else {
result = op1 & ~op2;
}
*((ULng32 *) op_data[0]) = result;
break;
case 8:
{
Int64 lop1 = *((Int64 *) op_data[1]);
Int64 lop2 = *((Int64 *) op_data[2]);
Int64 lresult;
if(getOperType() == ITM_MASK_SET) {
lresult = lop1 | lop2;
} else {
lresult = lop1 & ~lop2;
}
*((Int64 *) op_data[0]) = lresult;
break;
}
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ExFunctionShift
////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ExFunctionShift::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
if(getOperand(2)->getStorageLength() != 4) {
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
ULng32 shift = *((ULng32 *)op_data[2]);
ULng32 value, result;
switch (getOperand(0)->getStorageLength()) {
case 1:
value = *((UInt8 *) op_data[1]);
if(getOperType() == ITM_SHIFT_RIGHT) {
result = value >> shift;
} else {
result = value << shift;
}
*((UInt8 *) op_data[0]) = (UInt8) result;
break;
case 2:
value = *((unsigned short *) op_data[1]);
if(getOperType() == ITM_SHIFT_RIGHT) {
result = value >> shift;
} else {
result = value << shift;
}
*((unsigned short *) op_data[0]) = (unsigned short) result;
break;
case 4:
value = *((ULng32 *) op_data[1]);
if(getOperType() == ITM_SHIFT_RIGHT) {
result = value >> shift;
} else {
result = value << shift;
}
*((ULng32 *) op_data[0]) = result;
break;
case 8:
{
Int64 value = *((Int64 *) op_data[1]);
Int64 result;
if(getOperType() == ITM_SHIFT_RIGHT) {
result = value >> shift;
} else {
result = value << shift;
}
*((Int64 *) op_data[0]) = result;
break;
}
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
// LCOV_EXCL_STOP
NA_EIDPROC static
ex_expr::exp_return_type getDoubleValue(double *dest,
char *source,
Attributes *operand,
CollHeap *heap,
ComDiagsArea** diagsArea)
{
switch(operand->getDatatype()) {
case REC_FLOAT64:
*dest = *(double *)(source);
return ex_expr::EXPR_OK;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
}
NA_EIDPROC static
ex_expr::exp_return_type setDoubleValue(char *dest,
Attributes *operand,
double *source,
CollHeap *heap,
ComDiagsArea** diagsArea)
{
switch(operand->getDatatype()) {
case REC_FLOAT64:
*(double *)dest = *source;
return ex_expr::EXPR_OK;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
}
ex_expr::exp_return_type ExFunctionSVariance::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
double sumOfValSquared;
double sumOfVal;
double countOfVal;
double avgOfVal;
double result;
if(getDoubleValue(&sumOfValSquared, op_data[1], getOperand(1),
heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
if(getDoubleValue(&sumOfVal, op_data[2], getOperand(2), heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
if(getDoubleValue(&countOfVal, op_data[3], getOperand(3), heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
avgOfVal = sumOfVal/countOfVal;
if(countOfVal == 1) {
result = 0.0;
}
else {
result = (sumOfValSquared - (sumOfVal * avgOfVal)) / (countOfVal - 1);
if(result < 0.0) {
result = 0.0;
}
}
if(setDoubleValue(op_data[0], getOperand(0), &result, heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionSStddev::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
double sumOfValSquared;
double sumOfVal;
double countOfVal;
double avgOfVal;
double result;
if(getDoubleValue(&sumOfValSquared, op_data[1], getOperand(1),
heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
if(getDoubleValue(&sumOfVal, op_data[2], getOperand(2), heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
if(getDoubleValue(&countOfVal, op_data[3], getOperand(3), heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
avgOfVal = sumOfVal/countOfVal;
if(countOfVal == 1) {
result = 0.0;
}
else {
short err = 0;
result = (sumOfValSquared - (sumOfVal * avgOfVal)) / (countOfVal - 1);
if(result < 0.0) {
result = 0.0;
} else {
result = MathSqrt(result, err);
}
if (err)
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("SQRT");
ExRaiseSqlError(heap, diagsArea, EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0("STDDEV");
return ex_expr::EXPR_ERROR;
}
}
if(setDoubleValue(op_data[0], getOperand(0), &result, heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExpRaiseErrorFunction::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
char catName[ComAnsiNamePart::MAX_IDENTIFIER_EXT_LEN+1];
char schemaName[ComAnsiNamePart::MAX_IDENTIFIER_EXT_LEN+1];
// Don't do anything with the op[] data
// Create a DiagsArea to return the SQLCODE and the ConstraintName
// and TableName.
if (raiseError())
ExRaiseSqlError(heap, diagsArea, (ExeErrorCode)getSQLCODE());
else
ExRaiseSqlWarning(heap, diagsArea, (ExeErrorCode)getSQLCODE());
// SQLCODE correspoding to Triggered Action Exception
if (getSQLCODE() == ComDiags_TrigActionExceptionSQLCODE)
{
assert(constraintName_ && tableName_);
extractCatSchemaNames(catName, schemaName, constraintName_);
*(*diagsArea) << DgTriggerCatalog(catName);
*(*diagsArea) << DgTriggerSchema(schemaName);
*(*diagsArea) << DgTriggerName(constraintName_);
extractCatSchemaNames(catName, schemaName, tableName_);
*(*diagsArea) << DgCatalogName(catName);
*(*diagsArea) << DgSchemaName(schemaName);
*(*diagsArea) << DgTableName(tableName_);
}
else if (getSQLCODE() == ComDiags_SignalSQLCODE) // Signal Statement
{
if (constraintName_)
*(*diagsArea) << DgString0(constraintName_); // The SQLSTATE
if (getNumOperands()==2)
{
#pragma nowarn(1506) // warning elimination
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
#pragma warn(1506) // warning elimination
op_data[1][len1] = '\0';
*(*diagsArea) << DgString1(op_data[1]); // The string expression
}
else
if (tableName_)
*(*diagsArea) << DgString1(tableName_); // The message
}
else
{
if (constraintName_)
{
extractCatSchemaNames(catName, schemaName, constraintName_);
*(*diagsArea) << DgConstraintCatalog(catName);
*(*diagsArea) << DgConstraintSchema(schemaName);
*(*diagsArea) << DgConstraintName(constraintName_);
}
if (tableName_)
{
extractCatSchemaNames(catName, schemaName, tableName_);
*(*diagsArea) << DgCatalogName(catName);
*(*diagsArea) << DgSchemaName(schemaName);
*(*diagsArea) << DgTableName(tableName_);
}
}
// If it's a warning, we should return a predictable boolean value.
*((ULng32*)op_data[0]) = 0;
if (raiseError())
return ex_expr::EXPR_ERROR;
else
return ex_expr::EXPR_OK;
}
// -----------------------------------------------------------------------
// methods for ExFunctionPack
// -----------------------------------------------------------------------
// LCOV_EXCL_START
// Constructor.
ExFunctionPack::ExFunctionPack(Attributes** attr,
Space* space,
Lng32 width,
Lng32 base,
NABoolean nullsPresent)
: ex_function_clause(ITM_PACK_FUNC,3,attr,space),
width_(width), base_(base)
{
setNullsPresent(nullsPresent);
}
// Evaluator.
ex_expr::exp_return_type ExFunctionPack::eval(char* op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
char guard1 = op_data[0][-1];
char guard2 = op_data[0][getOperand(0)->getLength()];
// Extract no of rows already in the packed record.
Lng32 noOfRows;
str_cpy_all((char*)&noOfRows,op_data[0],sizeof(Lng32));
// Extract the packing factor.
Lng32 pf = *(Lng32 *)op_data[2];
// The clause returns an error for no more slots in the packed record.
if(noOfRows >= pf)
{
ExRaiseSqlError(heap,diagsArea,EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
// Whether the source is null.
char* nullFlag = op_data[-2*ex_clause::MAX_OPERANDS+1];
// If null bit map is present in the packed record.
if(nullsPresent())
{
// Offset of null bit from the beginning of the null bitmap.
Lng32 nullBitOffsetInBytes = noOfRows >> 3;
// Offset of null bit from the beginning of the byte it is in.
Lng32 nullBitOffsetInBits = noOfRows & 0x7;
// Extract the byte in which the null bit is in.
char* nullByte = op_data[0] + nullBitOffsetInBytes + sizeof(Int32);
// Used to set/unset the null bit.
#pragma nowarn(1506) // warning elimination
unsigned char nullByteMask = (1 << nullBitOffsetInBits);
#pragma warn(1506) // warning elimination
// Turn bit off/on depending on whether operand is null.
if(nullFlag == 0)
*nullByte |= nullByteMask; // set null bit on.
else
*nullByte &= (~nullByteMask); // set null bit off.
}
else if(nullFlag == 0)
{
// Bit map is not present but input is null. We got a problem.
ExRaiseSqlError(heap,diagsArea,EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
// We have contents to copy only if source is not null.
if(nullFlag != 0)
{
// Width of each packet in the packed record. -ve means in no of bits.
if(width_ < 0)
{
Lng32 widthInBits = -width_;
// Length of data region which has already been occupied in bits.
Lng32 tgtBitsOccupied = (noOfRows * widthInBits);
// Byte offset for data of this packet from beginning of data region.
Lng32 tgtByteOffset = base_ + (tgtBitsOccupied >> 3);
// Bit offset for data of this packet from beginning of its byte.
Lng32 tgtBitOffset = (tgtBitsOccupied & 0x7);
// Byte offset of data source left to be copied.
Lng32 srcByteOffset = 0;
// Bit offset of data source from beginning of its byte to be copied.
Lng32 srcBitOffset = 0;
// No of bits to copy in total.
Lng32 bitsToCopy = widthInBits;
// There are still bits remaining to be copied.
while(bitsToCopy > 0)
{
// Pointer to the target byte.
char* tgtBytePtr = (op_data[0] + tgtByteOffset);
// No of bits left in the target byte.
Lng32 bitsLeftInTgtByte = 8 - tgtBitOffset;
// No of bits left in the source byte.
Lng32 bitsLeftInSrcByte = 8 - srcBitOffset;
Lng32 bitsToCopyThisRound = (bitsLeftInTgtByte > bitsLeftInSrcByte ?
bitsLeftInSrcByte : bitsLeftInTgtByte);
if(bitsToCopyThisRound > bitsToCopy) bitsToCopyThisRound = bitsToCopy;
// Mask has ones in the those positions where bits will be copied to.
#pragma nowarn(1506) // warning elimination
unsigned char mask = ((0xFF >> tgtBitOffset) <<
(8 - bitsToCopyThisRound)) >>
(8 - tgtBitOffset - bitsToCopyThisRound);
#pragma warn(1506) // warning elimination
// Clear target bits. Keep other bits unchanged in the target byte.
(*tgtBytePtr) &= (~mask);
// Align source bits with its the destination. Mask off other bits.
unsigned char srcByte = *(op_data[1] + srcByteOffset);
#pragma nowarn(1506) // warning elimination
srcByte = ((srcByte >> srcBitOffset) << tgtBitOffset) & mask;
#pragma warn(1506) // warning elimination
// Make the copy.
(*tgtBytePtr) |= srcByte;
// Move source byte and bit offsets.
srcBitOffset += bitsToCopyThisRound;
if(srcBitOffset >= 8)
{
srcByteOffset++;
srcBitOffset -= 8;
}
// Move target byte and bit offsets.
tgtBitOffset += bitsToCopyThisRound;
if(tgtBitOffset >= 8)
{
tgtByteOffset++;
tgtBitOffset -= 8;
}
bitsToCopy -= bitsToCopyThisRound;
}
}
else // width_ > 0
{
// Width in bytes: we can copy full strings of bytes.
Lng32 tgtByteOffset = base_ + (noOfRows * width_);
str_cpy_all(op_data[0]+tgtByteOffset,op_data[1],width_);
}
}
// Update the "noOfRows" in the packed record.
noOfRows++;
str_cpy_all(op_data[0],(char*)&noOfRows,sizeof(Lng32));
// $$$ supported as a CHAR rather than a VARCHAR for now.
// getOperand(0)->
// setVarLength(offset+lengthToCopy,op_data[-ex_clause::MAX_OPERANDS]);
if(guard1 != op_data[0][-1] ||
guard2 != op_data[0][getOperand(0)->getLength()]) {
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
// Signal a completely packed record to the caller.
if(noOfRows == pf) return ex_expr::EXPR_TRUE;
// Signal an incompletely packed record to the caller.
return ex_expr::EXPR_FALSE;
}
// ExUnPackCol::eval() ------------------------------------------
// The ExUnPackCol clause extracts a set of bits from a CHAR value.
// The set of bits to extract is described by a base offset, a width,
// and an index. The offset and width are known at compile time, but
// the index is a run time variable. ExUnPackCol clause also gets
// the null indicator of the result from a bitmap within the CHAR
// field.
//
ex_expr::exp_return_type
ExUnPackCol::eval(char *op_data[], CollHeap *heap, ComDiagsArea **diagsArea)
{
// The width of the extract in BITS.
//
Lng32 width = width_;
// The base offset of the data in BYTES.
//
Lng32 base = base_;
// Boolean indicating if the NULL Bitmap is present.
// If it is present, then it starts at a 4 (sizeof(int)) byte offset.
//
NABoolean np = nullsPresent();
// Which piece of data are we extracting.
//
Lng32 index = *(Lng32 *)op_data[2];
// NULL Processing...
//
if(np) {
// The bit to be extracted.
//
Lng32 bitOffset = index;
// The byte of the CHAR field containing the bit.
//
#pragma nowarn(1506) // warning elimination
Lng32 byteOffset = sizeof(Int32) + (bitOffset >> 3);
#pragma warn(1506) // warning elimination
// The bit of the byte at byteOffset to be extracted.
//
bitOffset = bitOffset & 0x7;
// A pointer to the null indicators of the operands.
//
char **null_data = &op_data[-2 * ex_clause::MAX_OPERANDS];
// The mask used to test the NULL bit.
//
UInt32 mask = 1 << bitOffset;
// The byte containing the NULL Flag.
//
UInt32 byte = op_data[1][byteOffset];
// Is the NULL Bit set?
//
if(byte & mask) {
// The value is NULL, so set the result to NULL, and
// return since we do not need to extract the data.
//
*(short *)null_data[0] = (short)0xFFFF;
return ex_expr::EXPR_OK;
} else {
// The value is non-NULL, so set the indicator,
// continue to extract the data value.
//
*(short *)null_data[0] = 0;
}
}
// Bytes masks used for widths (1-8) of bit extracts.
//
const UInt32 masks[] = {0,1,3,7,15,31,63,127,255};
// Handle some special cases:
// Otherwise do a generic bit extract.
//
if(width == 8 || width == 4 || width == 2 || width == 1) {
// Items per byte for special case widths (1-8).
//
const UInt32 itemsPerBytes[] = {0,8,4,2,2,1,1,1,1};
// Amount to shift the index to get a byte index for the
// special case widths.
//
const UInt32 itemsPerByteShift[] = {0,3,2,1,1,0,0,0,0};
// Extracted value.
//
UInt32 value;
// An even more special case.
//
if(width == 8) {
// Must use unsigned assignment so that sign extension is not done.
// Later when signed bit precision integers are support will have
// to have a special case for those.
//
value = (unsigned char)op_data[1][base + index];
} else {
// The number of items in a byte.
//
UInt32 itemsPerByte = itemsPerBytes[width];
// The amount to shift the index to get a byte offset.
//
UInt32 shift = itemsPerByteShift[width];
// The offset of the byte containing the value.
//
Lng32 byteIndex = index >> shift;
// The index into the byte of the value.
//
#pragma nowarn(1506) // warning elimination
Lng32 itemIndex = index & ( itemsPerByte - 1);
#pragma warn(1506) // warning elimination
// A mask to extract an item of size width.
//
UInt32 mask = masks[width];
// The byte containing the item.
//
value = op_data[1][base + byteIndex];
// Shift the byte, so that the value to be
// extracted is in the least significant bits.
//
value = value >> (width * itemIndex);
// Clear all bits except those of the value.
//
value = value & mask;
}
// Copy value to result.
//
switch(getOperand(0)->getLength()) {
case 1:
#pragma nowarn(1506) // warning elimination
*(unsigned char *)op_data[0] = value;
#pragma warn(1506) // warning elimination
return ex_expr::EXPR_OK;
case 2:
#pragma nowarn(1506) // warning elimination
*(unsigned short *)op_data[0] = value;
#pragma warn(1506) // warning elimination
return ex_expr::EXPR_OK;
case 4:
*(ULng32 *)op_data[0] = value;
return ex_expr::EXPR_OK;
default:
// ERROR - This should never happen.
//
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
#pragma nowarn(203) // warning elimination
return ex_expr::EXPR_OK;
#pragma warn(203) // warning elimination
}
// Handle special case of a Byte copy.
//
if((width % 8) == 0) {
width = width/8;
str_cpy_all(op_data[0], &op_data[1][base + (index * width)], width);
return ex_expr::EXPR_OK;
}
char guard1 = op_data[0][-1];
char guard2 = op_data[0][getOperand(0)->getLength()];
// The general case of arbitrary bit lengths that can span byte boundaries.
//
// The offset to the value in bits.
//
Lng32 bitOffset = index * width;
// The offset to the last bit of the value in bits.
//
Lng32 bitOffsetEnd = bitOffset + width - 1;
// The offset to the byte containing the first bit of the value.
// in bytes.
//
Lng32 byteOffset = base + (bitOffset >> 3);
// The offset to the byte containing the first bit beyond the value.
// in bytes.
//
Lng32 byteOffsetEnd = base + (bitOffsetEnd >> 3);
// The offset of the first bit in the byte.
//
bitOffset = bitOffset & 0x7;
// The amount to shift the byte to the right to align
// the lower portion.
//
Lng32 rshift = bitOffset;
// The amount to shift the byte to the left to align
// the upper portion.
//
Lng32 lshift = 8 - bitOffset;
// An index into the destination.
//
Lng32 dindex = 0;
// Copy all the bits to the destination.
//
Int32 i = byteOffset;
for(; i <= byteOffsetEnd; i++) {
// Get a byte containing bits of the value.
//
unsigned char byte = op_data[1][i];
if(dindex > 0) {
// After the first byte, must copy the upper
// portion of the byte to the previous byte of
// the result. This is the second time writing
// to this byte.
//
op_data[0][dindex - 1] |= byte << lshift;
}
if(dindex < (Lng32) getOperand(0)->getLength()) {
// Copy the lower portion of this byte of the result
// to the destination. This is the first time this
// byte is written to.
//
#pragma nowarn(1506) // warning elimination
op_data[0][dindex] = byte >> rshift;
#pragma warn(1506) // warning elimination
}
dindex++;
}
// Clear all bits of the result that did not come
// from the extracted value.
//
for(i = 0; i < (Lng32) getOperand(0)->getLength(); i++) {
#pragma nowarn(1506) // warning elimination
unsigned char mask = (width > 7) ? 0xFF : masks[width];
#pragma warn(1506) // warning elimination
op_data[0][i] &= mask;
width -= 8;
width = (width < 0) ? 0 : width;
}
if(guard1 != op_data[0][-1] ||
guard2 != op_data[0][getOperand(0)->getLength()]) {
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
// LCOV_EXCL_STOP
ex_expr::exp_return_type ex_function_translate::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
Int32 copyLen = 0;
Int32 convertedLen = 0;
Int32 convType = get_conv_type();
Attributes * op0 = getOperand(0);
Attributes * op1 = getOperand(1);
#pragma nowarn(1506) // warning elimination
return convDoIt(op_data[1],
op1->getLength(op_data[-MAX_OPERANDS + 1]),
op1->getDatatype(),
op1->getPrecision(),
(convType == CONV_UTF8_F_UCS2_V) ? (Int32)(CharInfo::UTF8) : op1->getScale(),
op_data[0],
op0->getLength(),
op0->getDatatype(),
op0->getPrecision(),
(convType == CONV_UCS2_F_UTF8_V) ? (Int32)(CharInfo::UTF8) : op0->getScale(),
op_data[-MAX_OPERANDS],
op0->getVCIndicatorLength(),
heap,
diagsArea,
(ConvInstruction)convType);
#pragma warn(1506) // warning elimination
}
void ExFunctionRandomNum::initSeed(char *op_data[])
{
if (seed_==0)
{
if (simpleRandom())
{
// start with 1 and go up to max
seed_ = 1;
return;
}
if (getNumOperands() == 2)
{
// seed is specified as an argument. Use it.
#pragma nowarn(1506) // warning elimination
seed_ = *(ULng32 *)op_data[1];
#pragma warn(1506) // warning elimination
return;
}
// Pick an initial seed. According to the reference given below
// (in the eval function), all initial seeds between 1 and
// 2147483646 are equally valid. So, we just need to pick one
// in this range. Do this based on a timestamp.
// Use ex_function_current to get timestamp.
//
char currBuff[32];
char *opData[1];
opData[0] = currBuff;
ex_function_current currentFun;
currentFun.eval(&opData[0], 0, 0);
// Extract year, month, etc.
//
char *p = currBuff;
short year = *((short*) p);
p += sizeof(short);
char month = *p++;
char day = *p++;
char hour = *p++;
char minute = *p++;
char second = *p++;
Lng32 fraction = *((Lng32*) p);
// Local variables year, ..., fraction are now initialized.
// From the values of these variables, generate a seed in the
// desired range.
Lng32 x = year * month * day;
if (hour) x *= hour;
p = (char*) &x;
assert(sizeof(Lng32)==4);
p[0] |= (second<<1);
p[1] |= (minute<<1);
p[2] |= (minute<<2);
p[3] |= second;
seed_ = x + fraction;
if (seed_<0)
seed_ += 2147483647;
if ( seed_ < 1 ) seed_ = 1;
}
}
NA_EIDPROC void ExFunctionRandomNum::genRand(char *op_data[])
{
// Initialize seed if not already done
initSeed(op_data);
Lng32 t = 0;
const Lng32 M = 2147483647;
if (simpleRandom())
{
t = seed_ + 1;
}
else
{
// Algorithm is taken from "Random Number Generators: Good Ones
// Are Hard To Find", by Stephen K. Park and Keith W. Miller,
// Communications of the ACM, Volume 31, Number 10, Oct 1988.
const Lng32 A = 16807;
const Lng32 Q = 127773;
const Lng32 R = 2836;
Lng32 h = seed_/Q;
Lng32 l = seed_%Q;
t = A*l-R*h;
}
if (t>0)
seed_ = t;
else
seed_ = t + M;
}
ex_expr::exp_return_type ExFunctionRandomNum::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
genRand(op_data); // generates and sets the random number in seed_
*((ULng32*)op_data[0]) = (ULng32) seed_;
return ex_expr::EXPR_OK;
}
void ExFunctionRandomSelection::initDiff()
{
if (difference_ == -1)
{
difference_ = 0;
while (selProbability_ >= 1.0)
{
difference_++;
selProbability_ -= 1.0;
}
// Normalize the selProbability to a 32 bit integer and store in
// normProbability
normProbability_ = (Lng32) (selProbability_ * 0x7fffffff);
// reset the selProbability_ to original value in case this function
// gets called again
#pragma nowarn(1506) // warning elimination
selProbability_ += difference_;
#pragma warn(1506) // warning elimination
}
}
ex_expr::exp_return_type ExFunctionRandomSelection::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
initDiff(); // gets executed only once
genRand(NULL); // generates and sets the random number in seed_
if (getRand() < normProbability_)
*((ULng32*)op_data[0]) = (ULng32) (difference_ + 1);
else
*((ULng32*)op_data[0]) = (ULng32) (difference_);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExHash2Distrib::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
ULng32 keyValue = *(ULng32*)op_data[1];
ULng32 numParts = *(ULng32*)op_data[2];
ULng32 partNo =
(ULng32)(((Int64)keyValue * (Int64)numParts) >> 32);
*(ULng32*)op_data[0] = partNo;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExProgDistrib::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
ULng32 keyValue = *(Lng32*)op_data[1];
ULng32 totNumValues = *(Lng32*) op_data[2];
ULng32 resultValue = 1;
ULng32 offset = keyValue;
ULng32 i = 2;
while(offset >= i && i <= totNumValues) {
#pragma nowarn(1506) // warning elimination
Lng32 n1 = offset % i;
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
Lng32 n2 = offset / i;
#pragma warn(1506) // warning elimination
if (n1 == 0) {
offset = (i-1) * (n2 - 1) + resultValue;
resultValue = i;
i++;
} else {
Lng32 n3 = n2 << 1;
if(n1 > n3) {
Lng32 n = n1/n3 + (n1%n3 != 0);
offset -= n2 * n;
i += n;
} else {
offset -= n2;
i++;
}
}
}
*((ULng32 *)op_data[0]) = resultValue - 1;
return ex_expr::EXPR_OK;
}
// LCOV_EXCL_START
ex_expr::exp_return_type ExProgDistribKey::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
ULng32 value = *(ULng32*)op_data[1];
ULng32 offset = *(ULng32*)op_data[2];
ULng32 totNumValues = *(ULng32*)op_data[3];
ULng32 uniqueVal = offset >> 16;
offset = offset & 0x0000FFFF;
value++;
ULng32 i = totNumValues;
while(i >= 2) {
if (value==i) {
value = (ULng32) (offset-1)%(i-1) + 1;
offset = ((offset-1)/(i-1) + 1) * i;
i--;
} else if(offset < i) {
i = (offset>value?offset:value);
} else {
offset = offset + (offset-1)/(i-1);
i--;
}
}
Int64 result = offset;
result = ((result << 16) | uniqueVal) << 16;
*((Int64 *)op_data[0]) = result;
return ex_expr::EXPR_OK;
}
// LCOV_EXCL_STOP
ex_expr::exp_return_type ExPAGroup::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
ULng32 partNum = *(ULng32*)op_data[1];
ULng32 totNumGroups = *(ULng32*) op_data[2];
ULng32 totNumParts = *(ULng32*) op_data[3];
ULng32 scaleFactor = totNumParts / totNumGroups;
ULng32 transPoint = (totNumParts % totNumGroups);
ULng32 groupPart;
if(partNum < (transPoint * (scaleFactor + 1))) {
groupPart = partNum / (scaleFactor + 1);
} else {
groupPart = (partNum - transPoint) / scaleFactor;
}
*((ULng32 *)op_data[0]) = groupPart;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionRangeLookup::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
// Two operands get passed to ExFunctionRangeLookup: a pointer to
// the actual, encoded key, and a pointer into a constant array
// that contains the encoded split ranges. The result is a 4 byte
// integer, not NULL, that contains the partition number.
char *encodedKey = op_data[1];
char *sKeys = op_data[2];
Lng32 *result = (Lng32 *) op_data[0];
// Now perform a binary search in sKeys
Lng32 lo = 0;
Lng32 hi = numParts_; // note we have one more entry than parts
Lng32 probe;
Lng32 cresult;
while (hi-lo > 1)
{
// try the element in the middle (may round down)
probe = (lo+hi)/2;
// compare our encoded key with that middle split range
cresult = str_cmp(encodedKey,
&sKeys[probe*partKeyLen_],
partKeyLen_);
if (cresult <= 0)
hi = probe; // search first half, discard second half
if (cresult >= 0)
lo = probe; // search second half, discard first half
}
// Once we have narrowed it down to a difference between lo and hi
// of 0 or 1, we know that lo points to the index of our partition
// because the partition number must be greater or equal to lo and
// less than hi. Remember that we set hi to one more than we had
// partition numbers.
*result = lo;
return ex_expr::EXPR_OK;
}
ExRowsetArrayScan::ExRowsetArrayScan(){};
ExRowsetArrayRowid::ExRowsetArrayRowid(){};
ExRowsetArrayInto::ExRowsetArrayInto(){};
ExRowsetArrayScan::ExRowsetArrayScan(Attributes **attr,
Space *space,
Lng32 maxNumElem,
Lng32 elemSize,
NABoolean elemNullInd)
: maxNumElem_(maxNumElem),
elemSize_(elemSize),
elemNullInd_(elemNullInd),
ex_function_clause(ITM_ROWSETARRAY_SCAN, 3, attr, space)
{
};
ExRowsetArrayRowid::ExRowsetArrayRowid(Attributes **attr,
Space *space,
Lng32 maxNumElem)
: maxNumElem_(maxNumElem),
ex_function_clause(ITM_ROWSETARRAY_ROWID, 3, attr, space)
{
};
ExRowsetArrayInto::ExRowsetArrayInto(Attributes **attr,
Space *space,
Lng32 maxNumElem,
Lng32 elemSize,
NABoolean elemNullInd)
: maxNumElem_(maxNumElem),
numElem_(0),
elemSize_(elemSize),
elemNullInd_(elemNullInd),
ex_function_clause(ITM_ROWSETARRAY_INTO, 3, attr, space)
{
};
// ExRowsetArrayScan::eval() ------------------------------------------
// The ExRowsetArrayScan clause extracts an element of the Rowset array
// The size of the element is known at compile time, but the index is a
// run time variable.
ex_expr::exp_return_type
ExRowsetArrayScan::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to the result
// op_data[1] points to the array
// op_data[2] points to the index
Lng32 index = *(Lng32 *)op_data[2];
if (index < 0 || index >= maxNumElem_)
{
// The index cannot be greater than the dimension of the array
// It is likely that there was an item expression evaluated at
// execution time to obtain the rowsetSize which is greater than
// the maximum allowed.
ExRaiseSqlError(heap, diagsArea, EXE_ROWSET_INDEX_OUTOF_RANGE);
**diagsArea << DgSqlCode(-EXE_ROWSET_INDEX_OUTOF_RANGE);
return ex_expr::EXPR_ERROR;
}
Attributes *ResultAttr = getOperand(0);
Attributes *SourceAttr = getOperand(1);
Lng32 size = ResultAttr->getStorageLength();
char *SourceElemPtr = &op_data[1][(index * size) + sizeof(Lng32)];
// NULL Processing...
if(elemNullInd_) {
// A pointer to the null indicators of the operands.
char **ResultNullData = &op_data[-2 * ex_clause::MAX_OPERANDS];
char *SourceElemIndPtr = SourceElemPtr;
SourceElemPtr += SourceAttr->getNullIndicatorLength();
// Set the indicator
if (ResultAttr->getNullFlag()) {
str_cpy_all(ResultNullData[0], SourceElemIndPtr,
SourceAttr->getNullIndicatorLength());
}
if ( ExpTupleDesc::isNullValue( SourceElemIndPtr,
SourceAttr->getNullBitIndex(),
SourceAttr->getTupleFormat() ) )
{
// The value is NULL, return since we do not need to extract the data.
return ex_expr::EXPR_NULL;
}
}
// For SQLVarChars, we have to copy both length and value fields.
// op_data[-ex_clause::MAX_OPERANDS] points to the length field of the
// SQLVarChar;
// The size of the field is sizeof(short) for rowset SQLVarChars.
if(SourceAttr->getVCIndicatorLength() > 0){
str_cpy_all((char*)op_data[-ex_clause::MAX_OPERANDS],
(char*)(&op_data[-ex_clause::MAX_OPERANDS+1][index*size]),
SourceAttr->getVCIndicatorLength()); //sizeof(short));
SourceElemPtr += SourceAttr->getVCIndicatorLength();
str_cpy_all(op_data[0], SourceElemPtr, size - SourceAttr->getVCIndicatorLength());
}
else {
// Note we do not have variable length for host variables. But we may not
// need to copy the whole length for strings.
str_cpy_all(op_data[0], SourceElemPtr, size);
}
return ex_expr::EXPR_OK;
}
Long ExRowsetArrayScan::pack(void * space)
{
return packClause(space, sizeof(ExRowsetArrayScan));
}
Long ExRowsetArrayRowid::pack(void * space)
{
return packClause(space, sizeof(ExRowsetArrayRowid));
}
Long ExRowsetArrayInto::pack(void * space)
{
return packClause(space, sizeof(ExRowsetArrayInto));
}
// ExRowsetArrayRowid::eval() ------------------------------------------
// The ExRowsetArrayRowid clause returns the value of the current index
ex_expr::exp_return_type
ExRowsetArrayRowid::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to the result
// op_data[1] points to the array
// op_data[2] points to the index
// The width of each data item in bytes
Lng32 index = *(Lng32 *)op_data[2];
if (index < 0 || index >= maxNumElem_)
{
// The index cannot be greater than the dimension of the array
// It is likely that there was an item expression evaluated at
// execution time to obtain the rowsetSize which is greater than
// the maximum allowed.
ExRaiseSqlError(heap, diagsArea, EXE_ROWSET_INDEX_OUTOF_RANGE);
**diagsArea << DgSqlCode(-EXE_ROWSET_INDEX_OUTOF_RANGE);
return ex_expr::EXPR_ERROR;
}
// Note we do not have variable length for host variables. But we may not
// need to copy the whole length for strings.
str_cpy_all(op_data[0], (char *)&index, sizeof(index));
return ex_expr::EXPR_OK;
}
// ExRowsetArrayInto::eval() ------------------------------------------
// The ExRowsetArrayInto clause appends a value into the Rowset array
// The size of the element is known at compile time
ex_expr::exp_return_type
ExRowsetArrayInto::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to the array (Result)
// op_data[1] points to the value to insert
// op_data[2] points to the rowset size expression
Lng32 runtimeMaxNumElem = *(Lng32 *)op_data[2];
if (numElem_ >= runtimeMaxNumElem || numElem_ >= maxNumElem_) {
// Overflow, we cannot add more elements to this rowset array
ExRaiseSqlError(heap, diagsArea, EXE_ROWSET_OVERFLOW);
**diagsArea << DgSqlCode(-EXE_ROWSET_OVERFLOW);
return ex_expr::EXPR_ERROR;
}
// Get number of rows stored in the array
Lng32 nrows;
str_cpy_all((char*)&nrows,op_data[0],sizeof(Lng32));
if (nrows >= runtimeMaxNumElem || nrows >= maxNumElem_) {
// Overflow, we cannot add more elements to this rowset array
ExRaiseSqlError(heap, diagsArea, EXE_ROWSET_OVERFLOW);
**diagsArea << DgSqlCode(-EXE_ROWSET_OVERFLOW);
return ex_expr::EXPR_ERROR;
}
Attributes *resultAttr = getOperand(0);
NABoolean resultIsNull = FALSE;
char *sourceNullData = op_data[-2 * ex_clause::MAX_OPERANDS + 1];
Attributes *sourceAttr = getOperand(1);
Lng32 elementSize = ((SimpleType *) resultAttr)->getStorageLength();
char *resultElemPtr = &op_data[0][(nrows * elementSize) +
sizeof (Lng32)];
// NULL Processing...
if (elemNullInd_) {
char *resultElemIndPtr = resultElemPtr;
// Set the indicator
if (sourceAttr->getNullFlag() && sourceNullData == 0) {
ExpTupleDesc::setNullValue(resultElemIndPtr,
resultAttr->getNullBitIndex(),
resultAttr->getTupleFormat());
resultIsNull = TRUE;
} else {
ExpTupleDesc::clearNullValue(resultElemIndPtr,
resultAttr->getNullBitIndex(),
resultAttr->getTupleFormat());
}
} else if (sourceAttr->getNullFlag() && sourceNullData == 0) {
// Source is null, but we do not have a way to express it
ExRaiseSqlError(heap, diagsArea, EXE_MISSING_INDICATOR_VARIABLE);
**diagsArea << DgSqlCode(-EXE_MISSING_INDICATOR_VARIABLE);
return ex_expr::EXPR_ERROR;
}
// Copy the result if not null
// For SQLVarChars, copy both val and len fields.
if (resultIsNull == FALSE){
if (DFS2REC::isSQLVarChar(resultAttr->getDatatype())) {
unsigned short VCLen = 0;
str_cpy_all((char *) &VCLen,
(char*)op_data[-ex_clause::MAX_OPERANDS + 1],
resultAttr->getVCIndicatorLength());
str_cpy_all( resultElemPtr+resultAttr->getNullIndicatorLength(),
(char *) &VCLen,
resultAttr->getVCIndicatorLength());
str_cpy_all(
resultElemPtr+resultAttr->getNullIndicatorLength()+
resultAttr->getVCIndicatorLength(),
op_data[1], VCLen);
}
else {
str_cpy_all(resultElemPtr + resultAttr->getNullIndicatorLength(),
op_data[1], resultAttr->getLength());
} // if isSQLVarChar
} // if resultIsNULL
// Update the number of elements in the object associated with the array
// and the array itself
nrows++;
str_cpy_all(op_data[0],(char*)&nrows,sizeof(Lng32));
return ex_expr::EXPR_OK;
}
// LCOV_EXCL_START
ex_expr::exp_return_type ex_function_nullifzero::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Attributes *tgtOp = getOperand(0);
char * tgt = op_data[0];
char * tgtNull = op_data[-2 * MAX_OPERANDS];
char * src = op_data[1];
Lng32 srcLen = getOperand(1)->getLength();
NABoolean resultIsNull = TRUE;
for (Int32 i = 0; i < srcLen; i++)
{
tgt[i] = src[i];
if (src[i] != 0)
{
resultIsNull = FALSE;
}
}
if (resultIsNull)
{
ExpTupleDesc::setNullValue(tgtNull,
tgtOp->getNullBitIndex(),
tgtOp->getTupleFormat());
}
else
{
ExpTupleDesc::clearNullValue(tgtNull,
tgtOp->getNullBitIndex(),
tgtOp->getTupleFormat());
}
return ex_expr::EXPR_OK;
}
// LCOV_EXCL_STOP
//
// NVL(e1, e2) returns e2 if e1 is NULL otherwise e1. NVL(e1, e2) is
// equivalent to ANSI/ISO
// COALESCE(e1, e2)
// or,
// CASE WHEN e1 IS NULL THEN e2 ELSE e1 END
// Both arguments can be nullable and actually null; they both can
// be constants as well.
// NVL() on CHAR type expressions is mapped to CASE. ISNULL(e1, e2) is
// mapped into NVL(e1, e2)
// Datatypes of e1 and e2 must be comparable/compatible.
//
ex_expr::exp_return_type ex_function_nvl::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
// Common index into op_data[] to access Null Indicators
Int32 opNullIdx = -2 * MAX_OPERANDS;
Attributes *tgtOp = getOperand(0);
Attributes *arg1 = getOperand(1);
Attributes *arg2 = getOperand(2);
char * tgt = op_data[0];
char * tgtNull = op_data[opNullIdx];
char * src;
UInt32 srcLen;
NABoolean resultIsNull = TRUE;
// As of today, NVL() on CHAR types becomes CASE. So make sure we are
// not dealing with any CHAR types
assert(!DFS2REC::isAnyCharacter(arg1->getDatatype()) &&
!DFS2REC::isAnyCharacter(arg2->getDatatype()));
// Locate the operand that is not null: if both are null
// resultIsNull will still be TRUE and we will just set the
// NULL flag of the result. If any operand is NOT NULL we copy
// that value into result and clear NULL flag of the result.
if (!arg1->getNullFlag() || op_data[opNullIdx + 1])
{
// First operand is either NOT NULLABLE or NON NULL Value.
// This is the result.
src = op_data[1];
srcLen = arg1->getLength();
resultIsNull = FALSE;
}
else
{
// Second operand could be the result, if it is not null.
src = op_data[2];
srcLen = arg2->getLength();
// Second operand is either NOT NULLABLE or NON NULL Value.
// This is the result.
if (!arg2->getNullFlag() || op_data[opNullIdx + 2])
resultIsNull = FALSE;
}
if (resultIsNull)
{
// Result must be nullable
assert(tgtOp->getNullFlag());
ExpTupleDesc::setNullValue(tgtNull,
tgtOp->getNullBitIndex(),
tgtOp->getTupleFormat());
}
else
{
// clear nullflag of result if it is nullable
if (tgtOp->getNullFlag())
ExpTupleDesc::clearNullValue(tgtNull,
tgtOp->getNullBitIndex(),
tgtOp->getTupleFormat());
}
// Copy src to result: this could be NULL
assert((UInt32)(tgtOp->getLength()) >= srcLen);
str_cpy_all(tgt, src, srcLen);
return ex_expr::EXPR_OK;
}
//
// Clause used to clear header bytes for both disk formats
// SQLMX_FORMAT and SQLMX_ALIGNED_FORMAT. The number of bytes to clear
// is different for both formats.
// This clause is only generated for insert expressions and update expressions
// (updates that are non-optimized since olt optimized updates do a strcpy
// of the old image and then update the specific columns).
ex_expr::exp_return_type ExHeaderClause::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
char *tgtData = op_data[0];
Attributes *tgtOp = getOperand(0);
// Clear the entire header (not the VOA area)
str_pad( tgtData, (Int32)adminSz_, '\0' );
if ( bitmapOffset_ > 0 )
((ExpAlignedFormat *)tgtData)->setBitmapOffset( bitmapOffset_ );
// Can not use the tgt attributes offset value here since for the aligned
// format this may not be the first fixed field since the fixed fields
// are re-ordered.
if ( isSQLMXAlignedFormat() )
((ExpAlignedFormat *)tgtData)->setFirstFixedOffset( firstFixedOffset_ );
else
ExpTupleDesc::setFirstFixedOffset( tgtData,
firstFixedOffset_,
tgtOp->getTupleFormat() );
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_queryid_extract::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Lng32 retcode = 0;
char * qidStr = op_data[1];
char * attrStr = op_data[2];
Lng32 qidLen = getOperand(1)->getLength();
Lng32 attrLen = getOperand(2)->getLength();
Lng32 attr = -999;
NABoolean isNumeric = FALSE;
// remove trailing blanks from attrStr
while (attrLen && attrStr[attrLen-1] == ' ')
attrLen--;
if (strncmp(attrStr, "SEGMENTNUM", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_SEGMENTNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "CPU", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_CPUNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "CPUNUM", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_CPUNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "PIN", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_PIN;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "EXESTARTTIME", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_EXESTARTTIME;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "SESSIONID", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_SESSIONID;
}
else if (strncmp(attrStr, "SESSIONNUM", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_SESSIONNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "USERNAME", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_USERNAME;
}
else if (strncmp(attrStr, "SESSIONNAME", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_SESSIONNAME;
}
else if (strncmp(attrStr, "QUERYNUM", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_QUERYNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "STMTNAME", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_STMTNAME;
}
Int64 value;
if (!isNumeric)
value = 99; // set max valueStr length
char valueStr[100];
retcode = ComSqlId::getSqlQueryIdAttr(
attr, qidStr, qidLen, value, valueStr);
if (retcode < 0)
{
ExRaiseFunctionSqlError(heap, diagsArea, (ExeErrorCode)(-retcode),
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
char * valPtr;
short datatype;
Lng32 length;
if (isNumeric)
{
valPtr = (char*)&value;
datatype = REC_BIN64_SIGNED;
length = 8;
}
else
{
valPtr = valueStr;
datatype = REC_BYTE_V_ANSI;
length = (Lng32)value + 1; // include null terminator
}
if (convDoIt(valPtr, length, datatype, 0, 0,
op_data[0],
getOperand(0)->getLength(),
getOperand(0)->getDatatype(),
getOperand(0)->getPrecision(),
getOperand(0)->getScale(),
op_data[-MAX_OPERANDS],
getOperand(0)->getVCIndicatorLength(),
heap, diagsArea))
return ex_expr::EXPR_ERROR;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionUniqueId::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Lng32 retcode = 0;
char * result = op_data[0];
Int64 uniqueUID;
ComUID comUID;
comUID.make_UID();
#if defined( NA_LITTLE_ENDIAN )
uniqueUID = reversebytes(comUID.get_value());
#else
uniqueUID = comUID.get_value();
#endif
str_cpy_all(result, (char*)&uniqueUID, sizeof(Int64));
str_pad(&result[sizeof(Int64)], sizeof(Int64), '\0');
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionRowNum::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
char * result = op_data[0];
Int64 rowNum = getExeGlobals()->rowNum();
str_cpy_all(result, (char*)&rowNum, sizeof(Int64));
str_pad(&result[sizeof(Int64)], sizeof(Int64), '\0');
return ex_expr::EXPR_OK;
}
short ExFunctionHbaseColumnLookup::extractColFamilyAndName(const char * input,
short len,
NABoolean isVarchar,
std::string &colFam, std::string &colName)
{
if (! input)
return -1;
Lng32 i = 0;
Lng32 startPos = 0;
if (isVarchar)
{
len = *(short*)input;
startPos = sizeof(len);
}
else if (len == -1)
{
len = strlen(input);
startPos = 0;
}
else
{
startPos = 0;
}
Lng32 j = 0;
i = startPos;
NABoolean colonFound = FALSE;
while ((j < len) && (not colonFound))
{
if (input[i] != ':')
{
i++;
}
else
{
colonFound = TRUE;
}
j++;
}
if (colonFound) // ":" found
{
colFam.assign(&input[startPos], i - startPos);
i++;
if (i < (startPos + len))
{
colName.assign(&input[i], (startPos + len) - i);
}
}
else
{
colName.assign(&input[startPos], i - startPos);
}
return 0;
}
ex_expr::exp_return_type
ExFunctionHbaseColumnLookup::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to result. The result is a varchar.
Attributes *resultAttr = getOperand(0);
Attributes *colDetailAttr = getOperand(1);
char * resultStart = op_data[0];
char * resultNull = op_data[-2 * MAX_OPERANDS];
char * result = resultStart;
char * colDetail = op_data[1];
Lng32 sourceLen = 0;
if (colDetailAttr->getVCIndicatorLength() == sizeof(Lng32))
str_cpy_all((char*)&sourceLen, op_data[-MAX_OPERANDS+1], sizeof(Lng32));
else
{
short tempLen = 0;
str_cpy_all((char*)&tempLen, op_data[-MAX_OPERANDS+1], sizeof(short));
sourceLen = tempLen;
}
char * pos = colDetail;
NABoolean done = FALSE;
NABoolean colFound = FALSE;
while (NOT done)
{
short colNameLen = 0;
Lng32 colValueLen = 0;
memcpy((char*)&colNameLen, pos, sizeof(short));
pos += sizeof(short);
if ((colNameLen == strlen(colName_)) &&
(str_cmp(colName_, pos, colNameLen) == 0))
{
pos += colNameLen;
memcpy((char*)&colValueLen, pos, sizeof(Lng32));
pos += sizeof(Lng32);
NABoolean charType = DFS2REC::isAnyCharacter(resultAttr->getDatatype());
if (! charType)
{
// lengths must match for non-char types
if (colValueLen != resultAttr->getLength())
continue;
}
UInt32 flags = 0;
ex_expr::exp_return_type rc =
convDoIt(pos,
colValueLen,
(charType ? REC_BYTE_F_ASCII : resultAttr->getDatatype()),
(charType ? 0 : resultAttr->getPrecision()),
(charType ? 0 : resultAttr->getScale()),
result,
resultAttr->getLength(),
resultAttr->getDatatype(),
resultAttr->getPrecision(),
resultAttr->getScale(),
NULL,
0,
heap,
diagsArea);
if ((rc != ex_expr::EXPR_OK) ||
((diagsArea) && (*diagsArea) && ((*diagsArea)->getNumber(DgSqlCode::WARNING_)) > 0))
{
if (rc == ex_expr::EXPR_OK)
{
(*diagsArea)->negateAllWarnings();
}
return ex_expr::EXPR_ERROR;
}
getOperand(0)->setVarLength(colValueLen, op_data[-MAX_OPERANDS]);
colFound = TRUE;
done = TRUE;
}
else
{
pos += colNameLen;
memcpy((char*)&colValueLen, pos, sizeof(Lng32));
pos += sizeof(Lng32);
pos += colValueLen;
if (pos >= (colDetail + sourceLen))
{
done = TRUE;
}
}
} // while
if (NOT colFound)
{
// move null value to result
ExpTupleDesc::setNullValue(resultNull,
resultAttr->getNullBitIndex(),
resultAttr->getTupleFormat() );
}
else
{
ExpTupleDesc::clearNullValue(resultNull,
resultAttr->getNullBitIndex(),
resultAttr->getTupleFormat() );
}
return ex_expr::EXPR_OK;
}
NABoolean ExFunctionHbaseColumnsDisplay::toBeDisplayed(
char * colName, Lng32 colNameLen)
{
if ((! colNames()) || (numCols_ == 0))
return TRUE;
char * currColName = colNames();
for (Lng32 i = 0; i < numCols_; i++)
{
short currColNameLen = *(short*)currColName;
currColName += sizeof(short);
if ((colNameLen == currColNameLen) &&
(memcmp(colName, currColName, colNameLen) == 0))
return TRUE;
currColName += currColNameLen;
}
return FALSE;
}
ex_expr::exp_return_type
ExFunctionHbaseColumnsDisplay::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to result. The result is a varchar.
Attributes *resultAttr = getOperand(0);
Attributes *colDetailAttr = getOperand(1);
char * resultStart = op_data[0];
char * result = resultStart;
char * colDetail = op_data[1];
Lng32 sourceLen = 0;
if (colDetailAttr->getVCIndicatorLength() == sizeof(Lng32))
str_cpy_all((char*)&sourceLen, op_data[-MAX_OPERANDS+1], sizeof(Lng32));
else
{
short tempLen = 0;
str_cpy_all((char*)&tempLen, op_data[-MAX_OPERANDS+1], sizeof(short));
sourceLen = tempLen;
}
char * pos = colDetail;
NABoolean done = FALSE;
while (NOT done)
{
short colNameLen = 0;
Lng32 colValueLen = 0;
memcpy((char*)&colNameLen, pos, sizeof(short));
pos += sizeof(short);
memcpy(result, pos, colNameLen);
pos += colNameLen;
// if this col name need to be returned, then return it.
if (NOT toBeDisplayed(result, colNameLen))
{
goto label_continue;
}
result += colNameLen;
memcpy(result, " => ", strlen(" => "));
result += strlen(" => ");
memcpy((char*)&colValueLen, pos, sizeof(Lng32));
pos += sizeof(Lng32);
memcpy(result, pos, colValueLen);
result += colValueLen;
pos += colValueLen;
if (pos < (colDetail + sourceLen))
{
memcpy(result, ", ", strlen(", "));
result += strlen(", ");
}
label_continue:
if (pos >= (colDetail + sourceLen))
{
done = TRUE;
}
}
// store the audit row image length in the varlen indicator.
getOperand(0)->setVarLength((result-resultStart), op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionHbaseColumnCreate::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to result. The result is a varchar.
// Values in result have already been populated by clauses evaluated
// before this clause is reached.
Attributes *resultAttr = getOperand(0);
char * resultStart = op_data[0];
char * result = resultStart;
str_cpy_all(result, (char*)&numEntries_, sizeof(numEntries_));
result += sizeof(short);
str_cpy_all(result, (char*)&colNameMaxLen_, sizeof(colNameMaxLen_));
result += sizeof(short);
str_cpy_all(result, (char*)&colValVCIndLen_, sizeof(colValVCIndLen_));
result += sizeof(short);
str_cpy_all(result, (char*)&colValMaxLen_, sizeof(colValMaxLen_));
result += sizeof(Int32);
for (Lng32 i = 0; i < numEntries_; i++)
{
// validate that column name is of right format: colfam:colname
std::string colFam;
std::string colNam;
ExFunctionHbaseColumnLookup::extractColFamilyAndName(
result, -1, TRUE/*isVarchar*/, colFam, colNam);
if (colFam.empty())
{
short colNameLen;
str_cpy_all((char*)&colNameLen, result, sizeof(short));
result += sizeof(short);
std::string colNamData(result, colNameLen);
ExRaiseSqlError(heap, diagsArea, (ExeErrorCode)1426, NULL, NULL, NULL, NULL,
colNamData.data());
return ex_expr::EXPR_ERROR;
}
result += sizeof(short);
result += ROUND2(colNameMaxLen_);
// skip the nullable bytes
result += sizeof(short);
if (colValVCIndLen_ == sizeof(short))
result += sizeof(short);
else
{
result = (char*)ROUND4((Int64)result);
result += sizeof(Lng32);
}
result += ROUND2(colValMaxLen_);
}
resultAttr->setVarLength(result - resultStart, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionCastType::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to result.
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
char * resultData = op_data[0];
char * srcData = op_data[1];
Lng32 sourceLen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 resultLen = resultAttr->getLength();
if (sourceLen < resultLen)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
str_cpy_all(resultData, srcData, resultLen);
getOperand(0)->setVarLength(resultLen, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionSequenceValue::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
short rc = 0;
// op_data[0] points to result. The result is a varchar.
Attributes *resultAttr = getOperand(0);
char * result = op_data[0];
SequenceValueGenerator * seqValGen = getExeGlobals()->seqGen();
Int64 seqVal = 0;
if (isCurr())
rc = seqValGen->getCurrSeqVal(sga_, seqVal);
else
rc = seqValGen->getNextSeqVal(sga_, seqVal);
if (rc)
{
ExRaiseSqlError(heap, diagsArea, (ExeErrorCode)ABS(rc));
return ex_expr::EXPR_ERROR;
}
*(Int64*)result = seqVal;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionHbaseTimestamp::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
short rc = 0;
// op_data[0] points to result.
Attributes *resultAttr = getOperand(0);
char * result = op_data[0];
Int64 * hbaseTS = (Int64*)op_data[1];
*(Int64*)result = hbaseTS[colIndex_];
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionHbaseVersion::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
short rc = 0;
// op_data[0] points to result.
Attributes *resultAttr = getOperand(0);
char * result = op_data[0];
Int64 * hbaseVersion = (Int64*)op_data[1];
*(Int64*)result = hbaseVersion[colIndex_];
return ex_expr::EXPR_OK;
}
/////////////////////////////////////////////////////////////////
// ExAuditImage::eval()
// The ExAuditImage clause evaluates the auditRowImageExpr_ and
// stores the result from it into the result of ExAuditImage.
// auditRowImageExpr_ constructs the audit row image in SQLMX_FORMAT.
/////////////////////////////////////////////////////////////////
ex_expr::exp_return_type
ExAuditImage::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to result. The result is a varchar.
Attributes *resultAttr = getOperand(0);
// This cri desc generated at codeGen() time to generate audit row image
// has 3 entries: 0, for consts. 1, for temps.
// 2, for the audit row image.
// 3, where the input tuple in EXPLODED format will be available.
ex_cri_desc * auditImageWorkCriDesc = auditImageContainerExpr_->criDesc();
short auditImageAtpIndex = 2; // where audit row image will be built
//Allocate ATP to evaluate the auditRowImageExpr_
atp_struct *auditImageWorkAtp = auditImageContainerExpr_->getWorkAtp();
// Set the data pointer to point to the resultAttr's data space.
auditImageWorkAtp->getTupp(auditImageAtpIndex).setDataPointer(op_data[0]);
ex_expr * auditRowImageExpr = (ex_expr *)auditImageContainerExpr_->getExpr();
Attributes *inputs = getOperand(1);
// set the inputs tuple to point to the temp tuple
short inputsAtpIndex = 3; // where the inputs to audit row image
// is available.
auditImageWorkAtp->getTupp(inputsAtpIndex).setDataPointer(op_data[1]);
// Setting the auditRowImageLength to the length from the tuple descriptor.
// This length is computed during compile time.
// For a row with variable length field(s), the eval() method re-calculates
// the row length and modifies the auditRowImageLength variable.
ULng32 auditRowImageLength = auditImageWorkCriDesc->getTupleDescriptor(auditImageAtpIndex)->tupleDataLength();
ex_expr::exp_return_type retCode = ex_expr::EXPR_OK;
retCode = auditRowImageExpr->eval(auditImageWorkAtp, // Input - atp1
0, // None - atp2
NULL, // heap
-1, // datalen
&auditRowImageLength // Output
);
if (retCode == ex_expr::EXPR_ERROR)
{
*diagsArea = auditImageWorkAtp->getDiagsArea();
ExRaiseFunctionSqlError(heap, diagsArea, EXE_AUDIT_IMAGE_EXPR_EVAL_ERROR,
derivedFunction(),
origFunctionOperType());
return retCode;
}
// store the audit row image length in the varlen indicator.
getOperand(0)->setVarLength(auditRowImageLength, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
//
// decodeKeyValue
//
// This routine decodes an encoded key value.
//
// Note: The target MAY point to the source to change the original
// value.
//
////////////////////////////////////////////////////////////////////
short ex_function_encode::decodeKeyValue(Attributes * attr,
NABoolean isDesc,
char *inSource,
char *varlen_ptr,
char *target,
char *target_varlen_ptr,
NABoolean handleNullability
)
{
Lng32 fsDatatype = attr->getDatatype();
Lng32 length = attr->getLength();
Lng32 precision = attr->getPrecision();
Lng32 encodedKeyLen = length;
if ((handleNullability) &&
(attr->getNullFlag()))
encodedKeyLen += attr->getNullIndicatorLength();
char * source = inSource;
if (isDesc)
{
// compliment all bytes
for (Lng32 k = 0; k < encodedKeyLen; k++)
#pragma nowarn(1506) // warning elimination
target[k] = ~(source[k]);
#pragma warn(1506) // warning elimination
source = target;
}
if ((handleNullability) &&
(attr->getNullFlag()))
{
if (target != source)
str_cpy_all(target, source, attr->getNullIndicatorLength());
source += attr->getNullIndicatorLength();
target += attr->getNullIndicatorLength();
}
switch (fsDatatype) {
#if defined( NA_LITTLE_ENDIAN )
case REC_BIN8_SIGNED:
//
// Flip the sign bit.
//
*(UInt8*)target = *(UInt8*)source;
target[0] ^= 0200;
break;
case REC_BIN8_UNSIGNED:
case REC_BOOLEAN:
*(UInt8*)target = *(UInt8*)source;
break;
case REC_BIN16_SIGNED:
//
// Flip the sign bit.
//
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
target[sizeof(short)-1] ^= 0200;
break;
case REC_BPINT_UNSIGNED:
case REC_BIN16_UNSIGNED:
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
break;
case REC_BIN32_SIGNED:
//
// Flip the sign bit.
//
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
target[sizeof(Lng32)-1] ^= 0200;
break;
case REC_BIN32_UNSIGNED:
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
break;
case REC_BIN64_SIGNED:
//
// Flip the sign bit.
//
*((_int64 *) target) = reversebytes( *((_int64 *) source) );
target[sizeof(_int64)-1] ^= 0200;
break;
case REC_BIN64_UNSIGNED:
*((UInt64 *) target) = reversebytes( *((UInt64 *) source) );
break;
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND:
switch(length)
{
case 2: // Signed 16 bit
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
target[SQL_SMALL_SIZE-1] ^= 0200;
break;
case 4: // Signed 32 bit
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
target[SQL_INT_SIZE-1] ^= 0200;
break;
case 8: // Signed 64 bit
*((_int64 *) target) = reversebytes( *((_int64 *) source) );
target[SQL_LARGE_SIZE-1] ^= 0200;
break;
default:
assert(FALSE);
break;
}; // switch(length)
break;
case REC_DATETIME: {
// This method has been modified as part of the MP Datetime
// Compatibility project. It has been made more generic so that
// it depends only on the start and end fields of the datetime type.
//
rec_datetime_field startField;
rec_datetime_field endField;
ExpDatetime *dtAttr = (ExpDatetime *)attr;
// Get the start and end fields for this Datetime type.
//
dtAttr->getDatetimeFields(dtAttr->getPrecision(),
startField,
endField);
// Copy all of the source to the destination, then reverse only
// those fields of the target that are longer than 1 byte
//
if (target != source)
str_cpy_all(target, source, length);
// Reverse the YEAR and Fractional precision fields if present.
//
char *ptr = target;
for(Int32 field = startField; field <= endField; field++) {
switch (field) {
case REC_DATE_YEAR:
// convert YYYY from little endian to big endian
//
*((unsigned short *) ptr) = reversebytes( *((unsigned short *) ptr) );
ptr += sizeof(short);
break;
case REC_DATE_MONTH:
case REC_DATE_DAY:
case REC_DATE_HOUR:
case REC_DATE_MINUTE:
// One byte fields are copied as is...
ptr++;
break;
case REC_DATE_SECOND:
ptr++;
// if there is a fraction, make it big endian
// (it is an unsigned long, beginning after the SECOND field)
//
if (dtAttr->getScale() > 0)
*((ULng32 *) ptr) = reversebytes( *((ULng32 *) ptr) );
break;
}
}
break;
}
#else
case REC_BIN8_SIGNED:
case REC_BIN16_SIGNED:
case REC_BIN32_SIGNED:
case REC_BIN64_SIGNED:
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND:
//
// Flip the sign bit.
//
if (target != source)
str_cpy_all(target, source, length);
target[0] ^= 0200;
break;
#endif
case REC_DECIMAL_LSE:
//
// If the number was negative, complement all the bytes. Otherwise, set
// the sign bit.
//
if (NOT(source[0] & 0200)) {
for (Lng32 i = 0; i < length; i++)
#pragma nowarn(1506) // warning elimination
target[i] = ~source[i];
#pragma warn(1506) // warning elimination
} else {
if (target != source)
str_cpy_all(target, source, length);
target[0] &= ~0200;
}
break;
case REC_NUM_BIG_SIGNED:
case REC_NUM_BIG_UNSIGNED: {
BigNum type(length, precision, 0, 0);
type.decode(source, target);
break;
}
case REC_IEEE_FLOAT32: {
//
// Encoded float (IEEE 754 - 1985 standard):
//
// +-+--------+-----------------------+
// | |Exponent| Mantissa |
// | |(8 bits)| (23 bits) |
// +-+--------+-----------------------+
// || |
// |+- Complemented if sign was neg.-+
// |
// +- Sign bit complement
//
// unencoded float (IEEE 754 - 1985 standard):
//
// +-+----------+---------------------+
// | | exponent | mantissa |
// | | (8 bits) | (23 bits) |
// +-+----------+---------------------+
// |
// +- Sign bit
//
// the following code is independent of the "endianess" of the
// archtiecture. Instead, it assumes IEEE 754 - 1985 standard
// for representation of floats
if (source[0] & 0200)
{
// sign bit is on. Indicates this was a positive number.
// Copy to target and clear the sign bit.
if (target != source)
str_cpy_all(target, source, length);
target[0] &= 0177;
}
else
{
// this was a negative number.
// flip all bits.
for (Lng32 i = 0; i < length; i++)
#pragma nowarn(1506) // warning elimination
target[i] = ~source[i];
#pragma warn(1506) // warning elimination
}
// here comes the dependent part
#ifdef NA_LITTLE_ENDIAN
*(ULng32 *) target = reversebytes(*(ULng32 *)target);
#endif
break;
}
case REC_IEEE_FLOAT64: {
//
// Encoded double (IEEE 754 - 1985 standard):
//
// +-+-----------+--------------------+
// | | Exponent | Mantissa |
// | | (11 bits) | (52 bits) |
// +-+-----------+--------------------+
// || |
// |+- Complemented if sign was neg.-+
// |
// +- Sign bit complement
//
// unencoded double (IEEE 754 - 1985 standard):
//
// +-+--------- -+--------------------+
// | | exponent | mantissa |
// | | (11 bits) | (52 bits) |
// +-+--------- -+--------------------+
// |
// +- Sign bit
//
// the following code is independent of the "endianess" of the
// archtiecture. Instead, it assumes IEEE 754 - 1985 standard
// for representation of floats
if (source[0] & 0200)
{
// sign bit is on. Indicates this was a positive number.
// Copy to target and clear the sign bit.
if (target != source)
str_cpy_all(target, source, length);
target[0] &= 0177;
}
else
{
// this was a negative number.
// flip all bits.
for (Lng32 i = 0; i < length; i++)
#pragma nowarn(1506) // warning elimination
target[i] = ~source[i];
#pragma warn(1506) // warning elimination
}
// here comes the dependent part
#ifdef NA_LITTLE_ENDIAN
*(Int64 *) target = reversebytes(*(Int64 *)target);
#endif
break;
}
case REC_BYTE_V_ASCII:
case REC_BYTE_V_ASCII_LONG: {
//
// Copy the source to the target.
//
short vc_len;
// See bug LP 1444134, make this compatible with encoding for
// varchars and remove the VC indicator
assert(attr->getVCIndicatorLength() == sizeof(vc_len));
str_cpy_all((char *) &vc_len, varlen_ptr, attr->getVCIndicatorLength());
if (target != source)
str_cpy_all(target, source, vc_len);
//
// Blankpad the target (if needed).
//
if (vc_len < length)
str_pad(&target[vc_len], (Int32) (length - vc_len), ' ');
//
// Make the length bytes to be the maximum length for this field. This
// will make all encoded varchar keys to have the same length and so the
// comparison will depend on the fixed part of the varchar buffer.
//
vc_len = (short) length;
if (target_varlen_ptr)
str_cpy_all(target_varlen_ptr, (char *) &vc_len, attr->getVCIndicatorLength());
break;
}
case REC_NCHAR_V_UNICODE:
{
//
// Copy the source to the target.
//
// See bug LP 1444134, make this compatible with encoding for
// varchars and remove the VC indicator
short vc_len;
assert(attr->getVCIndicatorLength() == sizeof(vc_len));
str_cpy_all((char *) &vc_len, varlen_ptr, attr->getVCIndicatorLength());
if (target != source)
str_cpy_all(target, source, vc_len);
//
// Blankpad the target (if needed).
//
if (vc_len < length)
wc_str_pad((NAWchar*)&target[attr->getVCIndicatorLength() + vc_len],
(Int32) (length - vc_len)/sizeof(NAWchar), unicode_char_set::space_char());
#if defined( NA_LITTLE_ENDIAN )
wc_swap_bytes((NAWchar*)&target[attr->getVCIndicatorLength()], length/sizeof(NAWchar));
#endif
//
// Make the length bytes to be the maximum length for this field. This
// will make all encoded varchar keys to have the same length and so the
// comparison will depend on the fixed part of the varchar buffer.
//
vc_len = (short) length;
if (target_varlen_ptr)
str_cpy_all(target_varlen_ptr, (char *) &vc_len, attr->getVCIndicatorLength());
break;
}
case REC_NCHAR_F_UNICODE:
{
if (target != source)
str_cpy_all(target, source, length);
#if defined( NA_LITTLE_ENDIAN )
wc_swap_bytes((NAWchar*)target, length/sizeof(NAWchar));
#endif
break;
}
default:
//
// Decoding is not needed. Just copy the source to the target.
//
if (target != source)
str_cpy_all(target, source, length);
break;
}
return 0;
}
NA_EIDPROC
static Lng32 convAsciiLength(Attributes * attr)
{
Lng32 d_len = 0;
Int32 scale_len = 0;
Lng32 datatype = attr->getDatatype();
Lng32 length = attr->getLength();
Lng32 precision = attr->getPrecision();
Lng32 scale = attr->getScale();
if (scale > 0)
scale_len = 1;
switch (datatype)
{
case REC_BPINT_UNSIGNED:
// Can set the display size based on precision. For now treat it as
// unsigned smallint
d_len = SQL_USMALL_DISPLAY_SIZE;
break;
case REC_BIN16_SIGNED:
d_len = SQL_SMALL_DISPLAY_SIZE + scale_len;
break;
case REC_BIN16_UNSIGNED:
d_len = SQL_USMALL_DISPLAY_SIZE + scale_len;
break;
case REC_BIN32_SIGNED:
d_len = SQL_INT_DISPLAY_SIZE + scale_len;
break;
case REC_BIN32_UNSIGNED:
d_len = SQL_UINT_DISPLAY_SIZE + scale_len;
break;
case REC_BIN64_SIGNED:
d_len = SQL_LARGE_DISPLAY_SIZE + scale_len;
break;
case REC_BIN64_UNSIGNED:
d_len = SQL_ULARGE_DISPLAY_SIZE + scale_len;
break;
case REC_NUM_BIG_UNSIGNED:
case REC_NUM_BIG_SIGNED:
d_len = precision + 1 + scale_len; // Precision + sign + decimal point
break;
case REC_BYTE_F_ASCII:
d_len = length;
break;
case REC_NCHAR_F_UNICODE:
case REC_NCHAR_V_UNICODE:
case REC_BYTE_V_ASCII:
case REC_BYTE_V_ASCII_LONG:
d_len = length;
break;
case REC_DECIMAL_UNSIGNED:
d_len = length + scale_len;
break;
case REC_DECIMAL_LSE:
d_len = length + 1 + scale_len;
break;
case REC_FLOAT32:
d_len = SQL_REAL_DISPLAY_SIZE;
break;
case REC_FLOAT64:
d_len = SQL_DOUBLE_PRECISION_DISPLAY_SIZE;
break;
case REC_DATETIME:
switch (precision) {
// add different literals for sqldtcode_date...etc. These literals
// are from sqlcli.h and cannot be included here in this file.
case 1 /*SQLDTCODE_DATE*/:
{
d_len = DATE_DISPLAY_SIZE;
}
break;
case 2 /*SQLDTCODE_TIME*/:
{
d_len = TIME_DISPLAY_SIZE +
(scale > 0 ? (1 + scale) : 0);
}
break;
case 3 /*SQLDTCODE_TIMESTAMP*/:
{
d_len = TIMESTAMP_DISPLAY_SIZE +
(scale > 0 ? (1 + scale) : 0);
}
break;
default:
d_len = length;
break;
}
break;
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND: {
rec_datetime_field startField;
rec_datetime_field endField;
ExpInterval::getIntervalStartField(datatype, startField);
ExpInterval::getIntervalEndField(datatype, endField);
// this code is copied from IntervalType::getStringSize in
// w:/common/IntervalType.cpp
d_len = 1 + 1 +
precision +
3/*IntervalFieldStringSize*/ * (endField - startField);
if (scale)
d_len += scale + 1; // 1 for "."
}
break;
default:
d_len = length;
break;
}
return d_len;
}
// LCOV_EXCL_START
// This is a function clause used by INTERPRET_AS_ROW to extract specific
// columns as specified by an extraction column list from an audit row image
// (compressed or uncompressed). The input to this function is an audit row
// image, possibly a modified field map (for compressed audit), and a list
// of columns to extract. The output is a tuple in exploded format made up of
// the extracted columns.
ExFunctionExtractColumns::ExFunctionExtractColumns
(OperatorTypeEnum operType,
short numOperands,
Attributes ** attr,
Space *space,
UInt32 compressedAudit,
ULng32 *extractColList,
ULng32 encodedKeyLength,
ExpTupleDesc *auditImageTupleDesc,
ExpTupleDesc *extractedRowTupleDesc)
: ex_function_clause(operType, numOperands, attr, space)
{
if (numOperands == 2)
flags_.mfMapIsNullConst_ = 1;
flags_.auditCompressionFlag_ = compressedAudit;
extractColList_ = (Lng32 *)extractColList;
numColsToExtract_ = extractedRowTupleDesc->numAttrs();
auditRowImageDesc_ = auditImageTupleDesc;
extractedRowDesc_ = extractedRowTupleDesc;
encodedKeyLen_ = encodedKeyLength;
}
ExFunctionExtractColumns::ExFunctionExtractColumns(void)
{
}
// This is the function that does the bulk of the work of extracting
// columns from an audit row image (compressed or uncompressed) as part
// of the work done by the function INTERPRET_AS_ROW.
ex_expr::exp_return_type ExFunctionExtractColumns::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
return ex_expr::EXPR_ERROR;
}
Long ExFunctionExtractColumns::pack (void * space)
{
auditRowImageDesc_.pack(space);
extractedRowDesc_.pack(space);
extractColList_.pack(space);
return packClause(space, sizeof(ExFunctionExtractColumns));
}
Lng32 ExFunctionExtractColumns::unpack (void * base, void * reallocator)
{
if (auditRowImageDesc_.unpack(base, reallocator)) return -1;
if (extractedRowDesc_.unpack(base, reallocator)) return -1;
if (extractColList_.unpack(base)) return -1;
return unpackClause(base, reallocator);
}
//helper function, convert a string into IPV4 , if valid, it can support leading and padding space
static Lng32 string2ipv4(char *srcData, Lng32 slen, unsigned int *inet_addr)
{
Int16 i = 0, j = 0 , p=0, leadingspace=0;
char buf[16];
Int16 dot=0;
if(slen < MIN_IPV4_STRING_LEN )
return 0;
unsigned char *ipv4_bytes= (unsigned char *)inet_addr;
if(srcData[0] == ' ')
{
char * next = srcData;
while (*next == ' ')
{
leadingspace++;
next++;
}
}
for(i=leadingspace , j = 0; i < slen ; i++)
{
if(srcData[i] == '.')
{
buf[j]=0;
p = str_atoi(buf, j);
if( p < 0 || p > 255 || j == 0)
{
return 0;
}
else
{
if(ipv4_bytes)
ipv4_bytes[dot] = (unsigned char)p;
}
j = 0;
dot++;
if(dot > 3) return 0;
}
else if(srcData[i] == ' ')
{
break; //space is terminator
}
else
{
if(isdigit(srcData[i]) == 0)
{
return 0;
}
else
buf[j] = srcData[i];
j++;
}
}
Int16 stoppos=i;
// the last part
buf[j]=0; //null terminator
for(i = 0; i < j; i ++) //check for invalid character
{
if(isdigit(buf[i]) == 0)
{
return 0;
}
}
p = str_atoi(buf, j);
if( p < 0 || p > 255 || j == 0) // check for invalid number
{
return 0;
}
else
{
if(ipv4_bytes)
ipv4_bytes[dot] = (unsigned char)p;
}
//if terminated by space
if( stoppos < slen -1)
{
for(j = stoppos ; j < slen; j++)
{
if(srcData[j] != ' ') return 0;
}
}
if(dot != 3)
return 0;
else
return 1;
}
ex_expr::exp_return_type ExFunctionInetAton::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
char * srcData = op_data[1];
char * resultData = op_data[0];
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
Lng32 slen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 rlen = resultAttr->getLength();
unsigned int addr;
int ret=string2ipv4(srcData, slen, &addr);
if(ret)
{
*(unsigned int *)op_data[0]=addr;
return ex_expr::EXPR_OK;
}
else
{
ExRaiseSqlError(heap, diags, EXE_INVALID_CHARACTER);
*(*diags) << DgString0("IP format") << DgString1("INET_ATON FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
ex_expr::exp_return_type ExFunctionInetNtoa::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
char buf[16]; //big enough
unsigned long addr = *(unsigned long*)op_data[1];
char * resultData = op_data[0];
Attributes *resultAttr = getOperand(0);
const unsigned char *ipv4_bytes= (const unsigned char *) &addr;
if( addr > 4294967295 )
{
ExRaiseSqlError(heap, diags, EXE_BAD_ARG_TO_MATH_FUNC);
*(*diags) << DgString0("INET_NTOA");
return ex_expr::EXPR_ERROR;
}
str_sprintf(buf, "%d.%d.%d.%d",
ipv4_bytes[0], ipv4_bytes[1], ipv4_bytes[2], ipv4_bytes[3]);
int slen = str_len(buf);
str_cpy_all(resultData, buf, slen);
getOperand(0)->setVarLength(slen, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionIsIP::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
char * resultData = op_data[0];
char * srcData = op_data[1];
Int16 i = 0, j = 0 , p=0;
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
Lng32 slen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 rlen = resultAttr->getLength();
if(getOperType() == ITM_ISIPV4)
{
if(string2ipv4(srcData, slen, NULL) == 0)
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
else
{
*(Int16 *)op_data[0] = 1;
return ex_expr::EXPR_OK;
}
}
else
{
Int16 gapcounter = 0 , portidx = 0;;
char portion[IPV6_PARTS_NUM][MAX_IPV6_STRING_LEN + 1];
char trimdata[MAX_IPV6_STRING_LEN + 1];
str_pad(trimdata,MAX_IPV6_STRING_LEN + 1, 0);
if(slen < MIN_IPV6_STRING_LEN )
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
char *ptr= srcData;
//cannot start with single :
if (*ptr == ':')
{
if (*(ptr+1) != ':')
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
}
else if (*ptr == ' ')
{
while(*ptr==' ') ptr++;
}
char * start=ptr;
if(slen - (srcData - ptr) > MAX_IPV6_STRING_LEN ) // must be padding space
{
if( start[MAX_IPV6_STRING_LEN] != ' ')
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
else {
for(j = MAX_IPV6_STRING_LEN; j >=0; j--)
{
if(ptr[j] != ' ') //stop, j is the last non-space char
break;
}
str_cpy_all(trimdata,start, j);
start = trimdata;
}
}
char ipv4[MAX_IPV6_STRING_LEN + 1];
j = 0;
int ipv4idx = 0;
// try to split the string into portions delieted by ':'
// also check '::', call it gap, there is only up to 1 gap
// if there is a gap, portion number can be smaller than 8
// without gap, portion number should be 8
// each portion must be 16 bit integer in HEX format
// leading 0 can be omit
for(i = 0; i< slen; i++)
{
if(start[i] == ':')
{
portion[portidx][j] = 0; //set the terminator
if(start[i+1] == ':')
{
if(j != 0) //some characters are already saved into current portion
portidx++;
gapcounter++;
j = 0; //reset temp buffer pointer
i++;
continue;
}
else
{
//new portion start
if( j == 0 )
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
portidx++;
j=0;
continue;
}
}
else if( start[i] == '.') //ipv4 mixed format
{
if( ipv4idx > 0 )
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
str_cpy_all(ipv4, portion[portidx],str_len(portion[portidx]));
if(strlen(start+i) < MAX_IPV4_STRING_LEN) //15 is the maximum IPV4 string length
str_cat((const char*)ipv4, start+i, ipv4);
else
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
if(string2ipv4(ipv4, strlen(ipv4), NULL) == 0)
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
else
{
ipv4idx = 2; //ipv4 use 2 portions, 32 bits
break; // ipv4 string must be the last portion
}
}
portion[portidx][j] = start[i];
j++;
}
if(gapcounter > 1 || portidx > IPV6_PARTS_NUM - 1)
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
else if(gapcounter ==0 && portidx+ipv4idx < IPV6_PARTS_NUM - 1)
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
//check each IPV6 portion
for(i =0; i < portidx ; i++)
{
int len = strlen(portion[i]);
if( len > 4) //IPV4 portion can be longer than 4 chars
{
if(ipv4idx == 0 || ((ipv4idx == 2) && ( i != portidx -1) ) ) // no IPV4 portion, or this is not the IPV4 portion
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
}
for(j = 0; j < len; j++)
{
if( (portion[i][j] >= 'A' && portion[i][j] <= 'F') ||
(portion[i][j] >= 'a' && portion[i][j] <= 'f') ||
(portion[i][j] >= '0' && portion[i][j] <= '9')
) //good
continue;
else
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
}
}
//everything is good, this is IPV6
*(Int16 *)op_data[0] = 1;
return ex_expr::EXPR_OK;
}
}
// LCOV_EXCL_STOP
#pragma warn(1506) // warning elimination
| 1 | 14,727 | I'm wondering why this isn't <openssl/md5.h>. Seems like one would have to copy the md5.h file into the source tree somewhere for this to compile cleanly. Maybe you meant to use angle brackets instead of quotes? | apache-trafodion | cpp |
@@ -102,7 +102,7 @@ class presence_of_all_elements_located(object):
def __call__(self, driver):
return _find_elements(driver, self.locator)
-class visibility_of_all_elements_located(object):
+class visibility_of_any_elements_located(object):
""" An expectation for checking that there is at least one element visible
on a web page.
locator is used to find the element | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import NoAlertPresentException
"""
* Canned "Expected Conditions" which are generally useful within webdriver
* tests.
"""
class title_is(object):
"""An expectation for checking the title of a page.
title is the expected title, which must be an exact match
returns True if the title matches, false otherwise."""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title == driver.title
class title_contains(object):
""" An expectation for checking that the title contains a case-sensitive
substring. title is the fragment of title expected
returns True when the title matches, False otherwise
"""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title in driver.title
class presence_of_element_located(object):
""" An expectation for checking that an element is present on the DOM
of a page. This does not necessarily mean that the element is visible.
locator - used to find the element
returns the WebElement once it is located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator)
class visibility_of_element_located(object):
""" An expectation for checking that an element is present on the DOM of a
page and visible. Visibility means that the element is not only displayed
but also has a height and width that is greater than 0.
locator - used to find the element
returns the WebElement once it is located and visible
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator))
except StaleElementReferenceException:
return False
class visibility_of(object):
""" An expectation for checking that an element, known to be present on the
DOM of a page, is visible. Visibility means that the element is not only
displayed but also has a height and width that is greater than 0.
element is the WebElement
returns the (same) WebElement once it is visible
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return _element_if_visible(self.element)
def _element_if_visible(element, visibility=True):
return element if element.is_displayed() == visibility else False
class presence_of_all_elements_located(object):
""" An expectation for checking that there is at least one element present
on a web page.
locator is used to find the element
returns the list of WebElements once they are located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_elements(driver, self.locator)
class visibility_of_all_elements_located(object):
""" An expectation for checking that there is at least one element visible
on a web page.
locator is used to find the element
returns the list of WebElements once they are located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return [element for element in _find_elements(driver, self.locator) if _element_if_visible(element)]
class text_to_be_present_in_element(object):
""" An expectation for checking if the given text is present in the
specified element.
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try :
element_text = _find_element(driver, self.locator).text
return self.text in element_text
except StaleElementReferenceException:
return False
class text_to_be_present_in_element_value(object):
"""
An expectation for checking if the given text is present in the element's
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try:
element_text = _find_element(driver,
self.locator).get_attribute("value")
if element_text:
return self.text in element_text
else:
return False
except StaleElementReferenceException:
return False
class frame_to_be_available_and_switch_to_it(object):
""" An expectation for checking whether the given frame is available to
switch to. If the frame is available it switches the given driver to the
specified frame.
"""
def __init__(self, locator):
self.frame_locator = locator
def __call__(self, driver):
try:
if isinstance(self.frame_locator, tuple):
driver.switch_to.frame(_find_element(driver,
self.frame_locator))
else:
driver.switch_to.frame(self.frame_locator)
return True
except NoSuchFrameException:
return False
class invisibility_of_element_located(object):
""" An Expectation for checking that an element is either invisible or not
present on the DOM.
locator used to find the element
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator), False)
except (NoSuchElementException, StaleElementReferenceException):
# In the case of NoSuchElement, returns true because the element is
# not present in DOM. The try block checks if the element is present
# but is invisible.
# In the case of StaleElementReference, returns true because stale
# element reference implies that element is no longer visible.
return True
class element_to_be_clickable(object):
""" An Expectation for checking an element is visible and enabled such that
you can click it."""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
element = visibility_of_element_located(self.locator)(driver)
if element and element.is_enabled():
return element
else:
return False
class staleness_of(object):
""" Wait until an element is no longer attached to the DOM.
element is the element to wait for.
returns False if the element is still attached to the DOM, true otherwise.
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
try:
# Calling any method forces a staleness check
self.element.is_enabled()
return False
except StaleElementReferenceException as expected:
return True
class element_to_be_selected(object):
""" An expectation for checking the selection is selected.
element is WebElement object
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return self.element.is_selected()
class element_located_to_be_selected(object):
"""An expectation for the element to be located is selected.
locator is a tuple of (by, path)"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator).is_selected()
class element_selection_state_to_be(object):
""" An expectation for checking if the given element is selected.
element is WebElement object
is_selected is a Boolean."
"""
def __init__(self, element, is_selected):
self.element = element
self.is_selected = is_selected
def __call__(self, ignored):
return self.element.is_selected() == self.is_selected
class element_located_selection_state_to_be(object):
""" An expectation to locate an element and check if the selection state
specified is in that state.
locator is a tuple of (by, path)
is_selected is a boolean
"""
def __init__(self, locator, is_selected):
self.locator = locator
self.is_selected = is_selected
def __call__(self, driver):
try:
element = _find_element(driver, self.locator)
return element.is_selected() == self.is_selected
except StaleElementReferenceException:
return False
class alert_is_present(object):
""" Expect an alert to be present."""
def __init__(self):
pass
def __call__(self, driver):
try:
alert = driver.switch_to.alert
alert.text
return alert
except NoAlertPresentException:
return False
def _find_element(driver, by):
"""Looks up an element. Logs and re-raises ``WebDriverException``
if thrown."""
try :
return driver.find_element(*by)
except NoSuchElementException as e:
raise e
except WebDriverException as e:
raise e
def _find_elements(driver, by):
try :
return driver.find_elements(*by)
except WebDriverException as e:
raise e
| 1 | 13,212 | shouldn't **call** return a boolean? | SeleniumHQ-selenium | rb |
@@ -229,6 +229,16 @@ bool Mob::CastSpell(uint16 spell_id, uint16 target_id, CastingSlot slot,
return false;
}
+ if (IsEffectInSpell(spell_id, SE_Charm) && !PassCharmTargetRestriction(entity_list.GetMobID(target_id))) {
+ if (IsClient()) {
+ CastToClient()->SendSpellBarEnable(spell_id);
+ }
+ if (casting_spell_id && IsNPC()) {
+ CastToNPC()->AI_Event_SpellCastFinished(false, static_cast<uint16>(casting_spell_slot));
+ }
+ return false;
+ }
+
if (HasActiveSong() && IsBardSong(spell_id)) {
LogSpells("Casting a new song while singing a song. Killing old song [{}]", bardsong);
//Note: this does NOT tell the client | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2002 EQEMu Development Team (http://eqemu.org)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
General outline of spell casting process
1.
a) Client clicks a spell bar gem, ability, or item. client_process.cpp
gets the op and calls CastSpell() with all the relevant info including
cast time.
b) NPC does CastSpell() from AI
2.
a) CastSpell() determines there is a cast time and sets some state keeping
flags to be used to check the progress of casting and finish it later.
b) CastSpell() sees there's no cast time, and calls CastedSpellFinished()
Go to step 4.
3.
SpellProcess() notices that the spell casting timer which was set by
CastSpell() is expired, and calls CastedSpellFinished()
4.
CastedSpellFinished() checks some timed spell specific things, like
wether to interrupt or not, due to movement or melee. If successful
SpellFinished() is called.
5.
SpellFinished() checks some things like LoS, reagents, target and
figures out what's going to get hit by this spell based on its type.
6.
a) Single target spell, SpellOnTarget() is called.
b) AE spell, Entity::AESpell() is called.
c) Group spell, Group::CastGroupSpell()/SpellOnTarget() is called as
needed.
7.
SpellOnTarget() may or may not call SpellEffect() to cause effects to
the target
8.
If this was timed, CastedSpellFinished() will restore the client's
spell bar gems.
Most user code should call CastSpell(), with a 0 casting time if needed,
and not SpellFinished().
*/
#include "../common/bodytypes.h"
#include "../common/classes.h"
#include "../common/global_define.h"
#include "../common/eqemu_logsys.h"
#include "../common/item_instance.h"
#include "../common/rulesys.h"
#include "../common/skills.h"
#include "../common/spdat.h"
#include "../common/string_util.h"
#include "../common/data_verification.h"
#include "../common/misc_functions.h"
#include "quest_parser_collection.h"
#include "string_ids.h"
#include "worldserver.h"
#include "fastmath.h"
#include <assert.h>
#include <math.h>
#include <algorithm>
#ifndef WIN32
#include <stdlib.h>
#include "../common/unix.h"
#endif
#ifdef _GOTFRAGS
#include "../common/packet_dump_file.h"
#endif
#ifdef BOTS
#include "bot.h"
#endif
#include "mob_movement_manager.h"
#include "client.h"
extern Zone* zone;
extern volatile bool is_zone_loaded;
extern WorldServer worldserver;
extern FastMath g_Math;
using EQ::spells::CastingSlot;
// this is run constantly for every mob
void Mob::SpellProcess()
{
// check the rapid recast prevention timer
if(delaytimer == true && spellend_timer.Check())
{
spellend_timer.Disable();
delaytimer = false;
return;
}
// a timed spell is finished casting
if (casting_spell_id != 0 && casting_spell_checks && spellend_timer.Check())
{
spellend_timer.Disable();
delaytimer = false;
CastedSpellFinished(casting_spell_id, casting_spell_targetid, casting_spell_slot,
casting_spell_mana, casting_spell_inventory_slot, casting_spell_resist_adjust);
}
}
void NPC::SpellProcess()
{
Mob::SpellProcess();
if (swarm_timer.Check()) {
DepopSwarmPets();
}
}
///////////////////////////////////////////////////////////////////////////////
// functions related to begin/finish casting, fizzling etc
//
// only CastSpell and DoCastSpell should be setting casting_spell_id.
// basically casting_spell_id is only set when casting a triggered spell from
// the spell bar gems, an ability, or an item. note that it's actually set
// even if it's a 0 cast time, but then the spell is finished right after and
// it's unset. this is ok, since the 0 cast time spell is still a triggered
// one.
// the rule is you can cast one triggered (usually timed) spell at a time
// but things like SpellFinished() can run concurrent with a triggered cast
// to allow procs to work
bool Mob::CastSpell(uint16 spell_id, uint16 target_id, CastingSlot slot,
int32 cast_time, int32 mana_cost, uint32* oSpellWillFinish, uint32 item_slot,
uint32 timer, uint32 timer_duration, int16 *resist_adjust,
uint32 aa_id)
{
LogSpells("CastSpell called for spell [{}] ([{}]) on entity [{}], slot [{}], time [{}], mana [{}], from item slot [{}]",
(IsValidSpell(spell_id))?spells[spell_id].name:"UNKNOWN SPELL", spell_id, target_id, static_cast<int>(slot), cast_time, mana_cost, (item_slot==0xFFFFFFFF)?999:item_slot);
if(casting_spell_id == spell_id)
ZeroCastingVars();
if
(
!IsValidSpell(spell_id) ||
casting_spell_id ||
delaytimer ||
spellend_timer.Enabled() ||
IsStunned() ||
IsFeared() ||
IsMezzed() ||
(IsSilenced() && !IsDiscipline(spell_id)) ||
(IsAmnesiad() && IsDiscipline(spell_id))
)
{
LogSpells("Spell casting canceled: not able to cast now. Valid? [{}], casting [{}], waiting? [{}], spellend? [{}], stunned? [{}], feared? [{}], mezed? [{}], silenced? [{}], amnesiad? [{}]",
IsValidSpell(spell_id), casting_spell_id, delaytimer, spellend_timer.Enabled(), IsStunned(), IsFeared(), IsMezzed(), IsSilenced(), IsAmnesiad() );
if(IsSilenced() && !IsDiscipline(spell_id))
MessageString(Chat::Red, SILENCED_STRING);
if(IsAmnesiad() && IsDiscipline(spell_id))
MessageString(Chat::Red, MELEE_SILENCE);
if(IsClient())
CastToClient()->SendSpellBarEnable(spell_id);
if(casting_spell_id && IsNPC())
CastToNPC()->AI_Event_SpellCastFinished(false, static_cast<uint16>(casting_spell_slot));
return(false);
}
//It appears that the Sanctuary effect is removed by a check on the client side (keep this however for redundancy)
if (spellbonuses.Sanctuary && (spells[spell_id].targettype != ST_Self && GetTarget() != this) || IsDetrimentalSpell(spell_id))
BuffFadeByEffect(SE_Sanctuary);
if(IsClient()){
int chance = CastToClient()->GetFocusEffect(focusFcMute, spell_id);//Client only
if (zone->random.Roll(chance)) {
MessageString(Chat::Red, SILENCED_STRING);
if(IsClient())
CastToClient()->SendSpellBarEnable(spell_id);
return(false);
}
}
if(IsDetrimentalSpell(spell_id) && !zone->CanDoCombat()){
MessageString(Chat::Red, SPELL_WOULDNT_HOLD);
if(IsClient())
CastToClient()->SendSpellBarEnable(spell_id);
if(casting_spell_id && IsNPC())
CastToNPC()->AI_Event_SpellCastFinished(false, static_cast<uint16>(casting_spell_slot));
return(false);
}
//cannot cast under divine aura
if(DivineAura()) {
LogSpells("Spell casting canceled: cannot cast while Divine Aura is in effect");
InterruptSpell(173, 0x121, false);
return(false);
}
if (spellbonuses.NegateIfCombat)
BuffFadeByEffect(SE_NegateIfCombat);
if (IsClient() && IsHarmonySpell(spell_id) && !HarmonySpellLevelCheck(spell_id, entity_list.GetMobID(target_id))) {
InterruptSpell(SPELL_NO_EFFECT, 0x121, spell_id);
return false;
}
if (HasActiveSong() && IsBardSong(spell_id)) {
LogSpells("Casting a new song while singing a song. Killing old song [{}]", bardsong);
//Note: this does NOT tell the client
_StopSong();
}
//Added to prevent MQ2 exploitation of equipping normally-unequippable/clickable items with effects and clicking them for benefits.
if(item_slot && IsClient() && (slot == CastingSlot::Item || slot == CastingSlot::PotionBelt))
{
EQ::ItemInstance *itm = CastToClient()->GetInv().GetItem(item_slot);
int bitmask = 1;
bitmask = bitmask << (CastToClient()->GetClass() - 1);
if( itm && itm->GetItem()->Classes != 65535 ) {
if ((itm->GetItem()->Click.Type == EQ::item::ItemEffectEquipClick) && !(itm->GetItem()->Classes & bitmask)) {
if (CastToClient()->ClientVersion() < EQ::versions::ClientVersion::SoF) {
// They are casting a spell from an item that requires equipping but shouldn't let them equip it
LogError("HACKER: [{}] (account: [{}]) attempted to click an equip-only effect on item [{}] (id: [{}]) which they shouldn't be able to equip!",
CastToClient()->GetCleanName(), CastToClient()->AccountName(), itm->GetItem()->Name, itm->GetItem()->ID);
database.SetHackerFlag(CastToClient()->AccountName(), CastToClient()->GetCleanName(), "Clicking equip-only item with an invalid class");
}
else {
MessageString(Chat::Red, MUST_EQUIP_ITEM);
}
return(false);
}
if ((itm->GetItem()->Click.Type == EQ::item::ItemEffectClick2) && !(itm->GetItem()->Classes & bitmask)) {
if (CastToClient()->ClientVersion() < EQ::versions::ClientVersion::SoF) {
// They are casting a spell from an item that they don't meet the race/class requirements to cast
LogError("HACKER: [{}] (account: [{}]) attempted to click a race/class restricted effect on item [{}] (id: [{}]) which they shouldn't be able to click!",
CastToClient()->GetCleanName(), CastToClient()->AccountName(), itm->GetItem()->Name, itm->GetItem()->ID);
database.SetHackerFlag(CastToClient()->AccountName(), CastToClient()->GetCleanName(), "Clicking race/class restricted item with an invalid class");
}
else {
if (CastToClient()->ClientVersion() >= EQ::versions::ClientVersion::RoF)
{
// Line 181 in eqstr_us.txt was changed in RoF+
Message(Chat::Yellow, "Your race, class, or deity cannot use this item.");
}
else
{
MessageString(Chat::Red, CANNOT_USE_ITEM);
}
}
return(false);
}
}
if (itm && (itm->GetItem()->Click.Type == EQ::item::ItemEffectEquipClick) && item_slot > EQ::invslot::EQUIPMENT_END){
if (CastToClient()->ClientVersion() < EQ::versions::ClientVersion::SoF) {
// They are attempting to cast a must equip clicky without having it equipped
LogError("HACKER: [{}] (account: [{}]) attempted to click an equip-only effect on item [{}] (id: [{}]) without equiping it!", CastToClient()->GetCleanName(), CastToClient()->AccountName(), itm->GetItem()->Name, itm->GetItem()->ID);
database.SetHackerFlag(CastToClient()->AccountName(), CastToClient()->GetCleanName(), "Clicking equip-only item without equiping it");
}
else {
MessageString(Chat::Red, MUST_EQUIP_ITEM);
}
return(false);
}
}
std::string export_string = fmt::format("{}", spell_id);
if(IsClient()) {
if (parse->EventPlayer(EVENT_CAST_BEGIN, CastToClient(), export_string, 0) != 0) {
return false;
}
} else if(IsNPC()) {
parse->EventNPC(EVENT_CAST_BEGIN, CastToNPC(), nullptr, export_string, 0);
}
//To prevent NPC ghosting when spells are cast from scripts
if (IsNPC() && IsMoving() && cast_time > 0) {
StopNavigation();
}
if(resist_adjust)
{
return(DoCastSpell(spell_id, target_id, slot, cast_time, mana_cost, oSpellWillFinish, item_slot, timer, timer_duration, *resist_adjust, aa_id));
}
else
{
return(DoCastSpell(spell_id, target_id, slot, cast_time, mana_cost, oSpellWillFinish, item_slot, timer, timer_duration, spells[spell_id].ResistDiff, aa_id));
}
}
//
// the order of things here is intentional and important. make sure you
// understand the whole spell casting process and the flags that are passed
// around if you're gonna modify this
//
// this is the 2nd phase of CastSpell, broken up like this to make it easier
// to repeat a spell for bard songs
//
bool Mob::DoCastSpell(uint16 spell_id, uint16 target_id, CastingSlot slot,
int32 cast_time, int32 mana_cost, uint32* oSpellWillFinish,
uint32 item_slot, uint32 timer, uint32 timer_duration,
int16 resist_adjust, uint32 aa_id)
{
Mob* pMob = nullptr;
int32 orgcasttime;
if(!IsValidSpell(spell_id)) {
InterruptSpell();
return(false);
}
const SPDat_Spell_Struct &spell = spells[spell_id];
LogSpells("DoCastSpell called for spell [{}] ([{}]) on entity [{}], slot [{}], time [{}], mana [{}], from item [{}]",
spell.name, spell_id, target_id, static_cast<int>(slot), cast_time, mana_cost, item_slot==0xFFFFFFFF?999:item_slot);
casting_spell_id = spell_id;
casting_spell_slot = slot;
casting_spell_inventory_slot = item_slot;
if(casting_spell_timer != 0xFFFFFFFF)
{
casting_spell_timer = timer;
casting_spell_timer_duration = timer_duration;
}
casting_spell_aa_id = aa_id;
// check for fizzle
// note that CheckFizzle itself doesn't let NPCs fizzle,
// but this code allows for it.
if (slot < CastingSlot::MaxGems && !CheckFizzle(spell_id)) {
int fizzle_msg = IsBardSong(spell_id) ? MISS_NOTE : SPELL_FIZZLE;
uint32 use_mana = ((spells[spell_id].mana) / 4);
LogSpells("Spell casting canceled: fizzled. [{}] mana has been consumed", use_mana);
// fizzle 1/4 the mana away
Mob::SetMana(GetMana() - use_mana); // We send StopCasting which will update mana
StopCasting();
MessageString(Chat::SpellFailure, fizzle_msg);
/**
* Song Failure message
*/
entity_list.FilteredMessageCloseString(
this,
true,
RuleI(Range, SpellMessages),
Chat::SpellFailure,
(IsClient() ? FilterPCSpells : FilterNPCSpells),
(fizzle_msg == MISS_NOTE ? MISSED_NOTE_OTHER : SPELL_FIZZLE_OTHER),
/*
MessageFormat: You miss a note, bringing your song to a close! (if missed note)
MessageFormat: A missed note brings %1's song to a close!
MessageFormat: %1's spell fizzles!
*/
GetName()
);
TryTriggerOnCastRequirement();
return(false);
}
SaveSpellLoc();
LogSpells("Casting [{}] Started at ({},{},{})", spell_id, m_SpellLocation.x, m_SpellLocation.y, m_SpellLocation.z);
// if this spell doesn't require a target, or if it's an optional target
// and a target wasn't provided, then it's us; unless TGB is on and this
// is a TGB compatible spell.
if((IsGroupSpell(spell_id) ||
spell.targettype == ST_AEClientV1 ||
spell.targettype == ST_Self ||
spell.targettype == ST_AECaster ||
spell.targettype == ST_Ring ||
spell.targettype == ST_Beam) && target_id == 0)
{
LogSpells("Spell [{}] auto-targeted the caster. Group? [{}], target type [{}]", spell_id, IsGroupSpell(spell_id), spell.targettype);
target_id = GetID();
}
if(cast_time <= -1) {
// save the non-reduced cast time to use in the packet
cast_time = orgcasttime = spell.cast_time;
// if there's a cast time, check if they have a modifier for it
if(cast_time) {
cast_time = GetActSpellCasttime(spell_id, cast_time);
}
}
else
orgcasttime = cast_time;
// we checked for spells not requiring targets above
if(target_id == 0) {
LogSpells("Spell Error: no target. spell=[{}]", spell_id);
if(IsClient()) {
//clients produce messages... npcs should not for this case
MessageString(Chat::Red, SPELL_NEED_TAR);
InterruptSpell();
} else {
InterruptSpell(0, 0, 0); //the 0 args should cause no messages
}
return(false);
}
// ok now we know the target
casting_spell_targetid = target_id;
if (RuleB(Spells, InvisRequiresGroup) && IsInvisSpell(spell_id)) {
if (IsClient() && GetTarget() && GetTarget()->IsClient()) {
Client* spell_caster = this->CastToClient();
Client* spell_target = entity_list.GetClientByID(target_id);
if (spell_target && spell_target->GetID() != GetID()) {
bool cast_failed = true;
if (spell_target->IsGrouped()) {
Group *target_group = spell_target->GetGroup();
Group *my_group = GetGroup();
if (
target_group &&
my_group &&
(target_group->GetID() == my_group->GetID())
) {
cast_failed = false;
}
} else if (spell_target->IsRaidGrouped()) {
Raid *target_raid = spell_target->GetRaid();
Raid *my_raid = GetRaid();
if (
target_raid &&
my_raid &&
(target_raid->GetGroup(spell_target) == my_raid->GetGroup(spell_caster))
) {
cast_failed = false;
}
}
if (cast_failed) {
InterruptSpell(spell_id);
MessageString(Chat::Red, TARGET_GROUP_MEMBER);
return false;
}
}
}
}
// We don't get actual mana cost here, that's done when we consume the mana
if (mana_cost == -1)
mana_cost = spell.mana;
// mana is checked for clients on the frontend. we need to recheck it for NPCs though
// If you're at full mana, let it cast even if you dont have enough mana
// we calculated this above, now enforce it
if(mana_cost > 0 && slot != CastingSlot::Item)
{
int my_curmana = GetMana();
int my_maxmana = GetMaxMana();
if(my_curmana < mana_cost) // not enough mana
{
//this is a special case for NPCs with no mana...
if(IsNPC() && my_curmana == my_maxmana)
{
mana_cost = 0;
} else {
LogSpells("Spell Error not enough mana spell=[{}] mymana=[{}] cost=[{}]\n", spell_id, my_curmana, mana_cost);
if(IsClient()) {
//clients produce messages... npcs should not for this case
MessageString(Chat::Red, INSUFFICIENT_MANA);
InterruptSpell();
} else {
InterruptSpell(0, 0, 0); //the 0 args should cause no messages
}
return(false);
}
}
}
if(mana_cost > GetMana())
mana_cost = GetMana();
// we know our mana cost now
casting_spell_mana = mana_cost;
casting_spell_resist_adjust = resist_adjust;
LogSpells("Spell [{}]: Casting time [{}] (orig [{}]), mana cost [{}]",
spell_id, cast_time, orgcasttime, mana_cost);
// now tell the people in the area -- we ALWAYS want to send this, even instant cast spells.
// The only time this is skipped is for NPC innate procs and weapon procs. Procs from buffs
// oddly still send this. Since those cases don't reach here, we don't need to check them
if (slot != CastingSlot::Discipline)
SendBeginCast(spell_id, orgcasttime);
// cast time is 0, just finish it right now and be done with it
if(cast_time == 0) {
if (!DoCastingChecks()) {
StopCasting();
return false;
}
CastedSpellFinished(spell_id, target_id, slot, mana_cost, item_slot, resist_adjust);
return(true);
}
cast_time = mod_cast_time(cast_time);
// ok we know it has a cast time so we can start the timer now
spellend_timer.Start(cast_time);
if (IsAIControlled())
{
SetRunAnimSpeed(0);
pMob = entity_list.GetMob(target_id);
if (pMob && this != pMob)
FaceTarget(pMob);
}
// if we got here we didn't fizzle, and are starting our cast
if (oSpellWillFinish)
*oSpellWillFinish = Timer::GetCurrentTime() + cast_time + 100;
if (IsClient() && slot == CastingSlot::Item && item_slot != 0xFFFFFFFF) {
auto item = CastToClient()->GetInv().GetItem(item_slot);
if (item && item->GetItem())
MessageString(Chat::Spells, BEGINS_TO_GLOW, item->GetItem()->Name);
}
if (!DoCastingChecks()) {
StopCasting();
return false;
}
return(true);
}
void Mob::SendBeginCast(uint16 spell_id, uint32 casttime)
{
auto outapp = new EQApplicationPacket(OP_BeginCast, sizeof(BeginCast_Struct));
auto begincast = (BeginCast_Struct *)outapp->pBuffer;
begincast->caster_id = GetID();
begincast->spell_id = spell_id;
begincast->cast_time = casttime; // client calculates reduced time by itself
outapp->priority = 3;
entity_list.QueueCloseClients(
this, /* Sender */
outapp, /* Packet */
false, /* Ignore Sender */
RuleI(Range, BeginCast),
0, /* Skip this Mob */
true /* Packet ACK */
); //IsClient() ? FILTER_PCSPELLS : FILTER_NPCSPELLS);
safe_delete(outapp);
}
/*
* Some failures should be caught before the spell finishes casting
* This is especially helpful to clients when they cast really long things
* If this passes it sets casting_spell_checks to true which is checked in
* SpellProcess(), if a situation ever arises where a spell is delayed by these
* it's probably doing something wrong.
*/
bool Mob::DoCastingChecks()
{
if (!IsClient() || (IsClient() && CastToClient()->GetGM())) {
casting_spell_checks = true;
return true;
}
uint16 spell_id = casting_spell_id;
Mob *spell_target = entity_list.GetMob(casting_spell_targetid);
if (RuleB(Spells, BuffLevelRestrictions)) {
// casting_spell_targetid is guaranteed to be what we went, check for ST_Self for now should work though
if (spell_target && spells[spell_id].targettype != ST_Self && !spell_target->CheckSpellLevelRestriction(spell_id)) {
LogSpells("Spell [{}] failed: recipient did not meet the level restrictions", spell_id);
if (!IsBardSong(spell_id))
MessageString(Chat::SpellFailure, SPELL_TOO_POWERFUL);
return false;
}
}
if (spells[spell_id].zonetype == 1 && !zone->CanCastOutdoor()) {
MessageString(Chat::Red, CAST_OUTDOORS);
return false;
}
if (IsEffectInSpell(spell_id, SE_Levitate) && !zone->CanLevitate()) {
Message(Chat::Red, "You can't levitate in this zone.");
return false;
}
if(zone->IsSpellBlocked(spell_id, glm::vec3(GetPosition()))) {
const char *msg = zone->GetSpellBlockedMessage(spell_id, glm::vec3(GetPosition()));
if (msg) {
Message(Chat::Red, msg);
return false;
} else {
Message(Chat::Red, "You can't cast this spell here.");
return false;
}
}
if (IsClient() && spells[spell_id].EndurTimerIndex > 0 && casting_spell_slot < CastingSlot::MaxGems)
if (!CastToClient()->IsLinkedSpellReuseTimerReady(spells[spell_id].EndurTimerIndex))
return false;
casting_spell_checks = true;
return true;
}
uint16 Mob::GetSpecializeSkillValue(uint16 spell_id) const {
switch(spells[spell_id].skill) {
case EQ::skills::SkillAbjuration:
return(GetSkill(EQ::skills::SkillSpecializeAbjure));
case EQ::skills::SkillAlteration:
return(GetSkill(EQ::skills::SkillSpecializeAlteration));
case EQ::skills::SkillConjuration:
return(GetSkill(EQ::skills::SkillSpecializeConjuration));
case EQ::skills::SkillDivination:
return(GetSkill(EQ::skills::SkillSpecializeDivination));
case EQ::skills::SkillEvocation:
return(GetSkill(EQ::skills::SkillSpecializeEvocation));
default:
//wtf...
break;
}
return(0);
}
void Client::CheckSpecializeIncrease(uint16 spell_id) {
// These are not active because CheckIncreaseSkill() already does so.
// It's such a rare occurance that adding them here is wasted..(ref only)
/*
if (IsDead() || IsUnconscious())
return;
if (IsAIControlled())
return;
*/
switch(spells[spell_id].skill) {
case EQ::skills::SkillAbjuration:
CheckIncreaseSkill(EQ::skills::SkillSpecializeAbjure, nullptr);
break;
case EQ::skills::SkillAlteration:
CheckIncreaseSkill(EQ::skills::SkillSpecializeAlteration, nullptr);
break;
case EQ::skills::SkillConjuration:
CheckIncreaseSkill(EQ::skills::SkillSpecializeConjuration, nullptr);
break;
case EQ::skills::SkillDivination:
CheckIncreaseSkill(EQ::skills::SkillSpecializeDivination, nullptr);
break;
case EQ::skills::SkillEvocation:
CheckIncreaseSkill(EQ::skills::SkillSpecializeEvocation, nullptr);
break;
default:
//wtf...
break;
}
}
void Client::CheckSongSkillIncrease(uint16 spell_id){
// These are not active because CheckIncreaseSkill() already does so.
// It's such a rare occurance that adding them here is wasted..(ref only)
/*
if (IsDead() || IsUnconscious())
return;
if (IsAIControlled())
return;
*/
switch(spells[spell_id].skill)
{
case EQ::skills::SkillSinging:
CheckIncreaseSkill(EQ::skills::SkillSinging, nullptr, -15);
break;
case EQ::skills::SkillPercussionInstruments:
if(this->itembonuses.percussionMod > 0) {
if (GetRawSkill(EQ::skills::SkillPercussionInstruments) > 0) // no skill increases if not trained in the instrument
CheckIncreaseSkill(EQ::skills::SkillPercussionInstruments, nullptr, -15);
else
MessageString(Chat::Red,NO_INSTRUMENT_SKILL); // tell the client that they need instrument training
}
else
CheckIncreaseSkill(EQ::skills::SkillSinging, nullptr, -15);
break;
case EQ::skills::SkillStringedInstruments:
if(this->itembonuses.stringedMod > 0) {
if (GetRawSkill(EQ::skills::SkillStringedInstruments) > 0)
CheckIncreaseSkill(EQ::skills::SkillStringedInstruments, nullptr, -15);
else
MessageString(Chat::Red,NO_INSTRUMENT_SKILL);
}
else
CheckIncreaseSkill(EQ::skills::SkillSinging, nullptr, -15);
break;
case EQ::skills::SkillWindInstruments:
if(this->itembonuses.windMod > 0) {
if (GetRawSkill(EQ::skills::SkillWindInstruments) > 0)
CheckIncreaseSkill(EQ::skills::SkillWindInstruments, nullptr, -15);
else
MessageString(Chat::Red,NO_INSTRUMENT_SKILL);
}
else
CheckIncreaseSkill(EQ::skills::SkillSinging, nullptr, -15);
break;
case EQ::skills::SkillBrassInstruments:
if(this->itembonuses.brassMod > 0) {
if (GetRawSkill(EQ::skills::SkillBrassInstruments) > 0)
CheckIncreaseSkill(EQ::skills::SkillBrassInstruments, nullptr, -15);
else
MessageString(Chat::Red,NO_INSTRUMENT_SKILL);
}
else
CheckIncreaseSkill(EQ::skills::SkillSinging, nullptr, -15);
break;
default:
break;
}
}
/*
returns true if spell is successful, false if it fizzled.
only works for clients, npcs shouldn't be fizzling..
new algorithm thats closer to live eq (i hope)
TODO: Add aa skills, item mods, reduced the chance to fizzle
*/
bool Mob::CheckFizzle(uint16 spell_id)
{
return(true);
}
bool Client::CheckFizzle(uint16 spell_id)
{
// GMs don't fizzle
if (GetGM()) return(true);
uint8 no_fizzle_level = 0;
//Live AA - Spell Casting Expertise, Mastery of the Past
no_fizzle_level = aabonuses.MasteryofPast + itembonuses.MasteryofPast + spellbonuses.MasteryofPast;
if (spells[spell_id].classes[GetClass()-1] < no_fizzle_level)
return true;
//is there any sort of focus that affects fizzling?
int par_skill;
int act_skill;
par_skill = spells[spell_id].classes[GetClass()-1] * 5 - 10;//IIRC even if you are lagging behind the skill levels you don't fizzle much
if (par_skill > 235)
par_skill = 235;
par_skill += spells[spell_id].classes[GetClass()-1]; // maximum of 270 for level 65 spell
act_skill = GetSkill(spells[spell_id].skill);
act_skill += GetLevel(); // maximum of whatever the client can cheat
act_skill += itembonuses.adjusted_casting_skill + spellbonuses.adjusted_casting_skill + aabonuses.adjusted_casting_skill;
LogSpells("Adjusted casting skill: [{}]+[{}]+[{}]+[{}]+[{}]=[{}]", GetSkill(spells[spell_id].skill), GetLevel(), itembonuses.adjusted_casting_skill, spellbonuses.adjusted_casting_skill, aabonuses.adjusted_casting_skill, act_skill);
//spell specialization
float specialize = GetSpecializeSkillValue(spell_id);
if(specialize > 0) {
switch(GetAA(aaSpellCastingMastery)){
case 1:
specialize = specialize * 1.05;
break;
case 2:
specialize = specialize * 1.15;
break;
case 3:
specialize = specialize * 1.3;
break;
}
if(((specialize/6.0f) + 15.0f) < zone->random.Real(0, 100)) {
specialize *= SPECIALIZE_FIZZLE / 200.0f;
} else {
specialize = 0.0f;
}
}
// == 0 --> on par
// > 0 --> skill is lower, higher chance of fizzle
// < 0 --> skill is better, lower chance of fizzle
// the max that diff can be is +- 235
float diff = par_skill + static_cast<float>(spells[spell_id].basediff) - act_skill;
// if you have high int/wis you fizzle less, you fizzle more if you are stupid
if(GetClass() == BARD)
{
diff -= (GetCHA() - 110) / 20.0;
}
else if (GetCasterClass() == 'W')
{
diff -= (GetWIS() - 125) / 20.0;
}
else if (GetCasterClass() == 'I')
{
diff -= (GetINT() - 125) / 20.0;
}
// base fizzlechance is lets say 5%, we can make it lower for AA skills or whatever
float basefizzle = 10;
float fizzlechance = basefizzle - specialize + diff / 5.0;
// always at least 1% chance to fail or 5% to succeed
fizzlechance = fizzlechance < 1 ? 1 : (fizzlechance > 95 ? 95 : fizzlechance);
float fizzle_roll = zone->random.Real(0, 100);
LogSpells("Check Fizzle [{}] spell [{}] fizzlechance: [{}] diff: [{}] roll: [{}]", GetName(), spell_id, fizzlechance, diff, fizzle_roll);
if(fizzle_roll > fizzlechance)
return(true);
return(false);
}
void Mob::ZeroCastingVars()
{
// zero out the state keeping vars
attacked_count = 0;
spellend_timer.Disable();
casting_spell_id = 0;
casting_spell_targetid = 0;
casting_spell_slot = CastingSlot::Gem1;
casting_spell_mana = 0;
casting_spell_inventory_slot = 0;
casting_spell_timer = 0;
casting_spell_timer_duration = 0;
casting_spell_resist_adjust = 0;
casting_spell_checks = false;
casting_spell_aa_id = 0;
delaytimer = false;
}
void Mob::InterruptSpell(uint16 spellid)
{
if (spellid == SPELL_UNKNOWN)
spellid = casting_spell_id;
InterruptSpell(0, 0x121, spellid);
}
// color not used right now
void Mob::InterruptSpell(uint16 message, uint16 color, uint16 spellid)
{
EQApplicationPacket *outapp = nullptr;
uint16 message_other;
bool bard_song_mode = false; //has the bard song gone to auto repeat mode
if (spellid == SPELL_UNKNOWN) {
if(bardsong) {
spellid = bardsong;
bard_song_mode = true;
} else {
spellid = casting_spell_id;
}
}
if(casting_spell_id && IsNPC()) {
CastToNPC()->AI_Event_SpellCastFinished(false, static_cast<uint16>(casting_spell_slot));
}
if(casting_spell_aa_id && IsClient()) { //Rest AA Timer on failed cast
CastToClient()->MessageString(Chat::SpellFailure, ABILITY_FAILED);
CastToClient()->ResetAlternateAdvancementTimer(casting_spell_aa_id);
}
ZeroCastingVars(); // resets all the state keeping stuff
LogSpells("Spell [{}] has been interrupted", spellid);
if(!spellid)
return;
if (bardsong || IsBardSong(casting_spell_id))
_StopSong();
if(bard_song_mode) {
return;
}
if(!message)
message = IsBardSong(spellid) ? SONG_ENDS_ABRUPTLY : INTERRUPT_SPELL;
// clients need some packets
if (IsClient() && message != SONG_ENDS)
{
// the interrupt message
outapp = new EQApplicationPacket(OP_InterruptCast, sizeof(InterruptCast_Struct));
InterruptCast_Struct* ic = (InterruptCast_Struct*) outapp->pBuffer;
ic->messageid = message;
ic->spawnid = GetID();
outapp->priority = 5;
CastToClient()->QueuePacket(outapp);
safe_delete(outapp);
SendSpellBarEnable(spellid);
}
// notify people in the area
// first figure out what message others should get
switch(message)
{
case SONG_ENDS:
message_other = SONG_ENDS_OTHER;
break;
case SONG_ENDS_ABRUPTLY:
message_other = SONG_ENDS_ABRUPTLY_OTHER;
break;
case MISS_NOTE:
message_other = MISSED_NOTE_OTHER;
break;
case SPELL_FIZZLE:
message_other = SPELL_FIZZLE_OTHER;
break;
default:
message_other = INTERRUPT_SPELL_OTHER;
}
// this is the actual message, it works the same as a formatted message
outapp = new EQApplicationPacket(OP_InterruptCast, sizeof(InterruptCast_Struct) + strlen(GetCleanName()) + 1);
InterruptCast_Struct* ic = (InterruptCast_Struct*) outapp->pBuffer;
ic->messageid = message_other;
ic->spawnid = GetID();
strcpy(ic->message, GetCleanName());
entity_list.QueueCloseClients(this, outapp, true, RuleI(Range, SongMessages), 0, true, IsClient() ? FilterPCSpells : FilterNPCSpells);
safe_delete(outapp);
}
// this is like interrupt, just it doesn't spam interrupt packets to everyone
// There are a few cases where this is what live does :P
void Mob::StopCasting()
{
if (casting_spell_id && IsNPC()) {
CastToNPC()->AI_Event_SpellCastFinished(false, static_cast<uint16>(casting_spell_slot));
}
if (IsClient()) {
auto c = CastToClient();
if (casting_spell_aa_id) { //Rest AA Timer on failed cast
c->MessageString(Chat::SpellFailure, ABILITY_FAILED);
c->ResetAlternateAdvancementTimer(casting_spell_aa_id);
}
auto outapp = new EQApplicationPacket(OP_ManaChange, sizeof(ManaChange_Struct));
auto mc = (ManaChange_Struct *)outapp->pBuffer;
mc->new_mana = GetMana();
mc->stamina = GetEndurance();
mc->spell_id = casting_spell_id;
mc->keepcasting = 0;
c->FastQueuePacket(&outapp);
}
ZeroCastingVars();
}
// this is called after the timer is up and the spell is finished
// casting. everything goes through here, including items with zero cast time
// only to be used from SpellProcess
// NOTE: do not put range checking, etc into this function. this should
// just check timed spell specific things before passing off to SpellFinished
// which figures out proper targets etc
void Mob::CastedSpellFinished(uint16 spell_id, uint32 target_id, CastingSlot slot,
uint16 mana_used, uint32 inventory_slot, int16 resist_adjust)
{
bool IsFromItem = false;
EQ::ItemInstance *item = nullptr;
if(IsClient() && slot != CastingSlot::Item && slot != CastingSlot::PotionBelt && spells[spell_id].recast_time > 1000) { // 10 is item
if(!CastToClient()->GetPTimers().Expired(&database, pTimerSpellStart + spell_id, false)) {
//should we issue a message or send them a spell gem packet?
MessageString(Chat::Red, SPELL_RECAST);
LogSpells("Casting of [{}] canceled: spell reuse timer not expired", spell_id);
StopCasting();
return;
}
}
if(IsClient() && (slot == CastingSlot::Item || slot == CastingSlot::PotionBelt))
{
IsFromItem = true;
item = CastToClient()->GetInv().GetItem(inventory_slot);
if(item && item->GetItem()->RecastDelay > 0)
{
if(!CastToClient()->GetPTimers().Expired(&database, (pTimerItemStart + item->GetItem()->RecastType), false)) {
MessageString(Chat::Red, SPELL_RECAST);
LogSpells("Casting of [{}] canceled: item spell reuse timer not expired", spell_id);
StopCasting();
return;
}
}
}
if(!IsValidSpell(spell_id))
{
LogSpells("Casting of [{}] canceled: invalid spell id", spell_id);
InterruptSpell();
return;
}
// prevent rapid recast - this can happen if somehow the spell gems
// become desynced and the player casts again.
if(IsClient())
{
if(delaytimer)
{
LogSpells("Casting of [{}] canceled: recast too quickly", spell_id);
Message(Chat::Red, "You are unable to focus.");
InterruptSpell();
return;
}
}
// make sure they aren't somehow casting 2 timed spells at once
if (casting_spell_id != spell_id)
{
LogSpells("Casting of [{}] canceled: already casting", spell_id);
MessageString(Chat::Red,ALREADY_CASTING);
InterruptSpell();
return;
}
bool bard_song_mode = false;
bool regain_conc = false;
Mob *spell_target = entity_list.GetMob(target_id);
// here we do different things if this is a bard casting a bard song from
// a spell bar slot
if(GetClass() == BARD) // bard's can move when casting any spell...
{
if (IsBardSong(spell_id)) {
if(spells[spell_id].buffduration == 0xFFFF) {
LogSpells("Bard song [{}] not applying bard logic because duration. dur=[{}], recast=[{}]", spells[spell_id].buffduration);
} else {
// So long recast bard songs need special bard logic, although the effects don't repulse like other songs
// This is basically a hack to get that effect
// You can hold down the long recast spells, but you only get the effects once
// Songs with mana cost also do not repulse
// AAs that use SE_TemporaryPets or SE_Familiar also do not repulse
// TODO fuck bards.
if (spells[spell_id].recast_time == 0 && spells[spell_id].mana == 0 && !IsEffectInSpell(spell_id, SE_TemporaryPets) && !IsEffectInSpell(spell_id, SE_Familiar)) {
bardsong = spell_id;
bardsong_slot = slot;
//NOTE: theres a lot more target types than this to think about...
if (spell_target == nullptr || (spells[spell_id].targettype != ST_Target && spells[spell_id].targettype != ST_AETarget))
bardsong_target_id = GetID();
else
bardsong_target_id = spell_target->GetID();
bardsong_timer.Start(6000);
}
LogSpells("Bard song [{}] started: slot [{}], target id [{}]", bardsong, (int) bardsong_slot, bardsong_target_id);
bard_song_mode = true;
}
}
}
else // not bard, check movement
{
// if has been attacked, or moved while casting
// check for regain concentration
if
(
attacked_count > 0 ||
GetX() != GetSpellX() ||
GetY() != GetSpellY()
)
{
// modify the chance based on how many times they were hit
// but cap it so it's not that large a factor
if(attacked_count > 15) attacked_count = 15;
float channelchance, distance_moved, d_x, d_y, distancemod;
if(IsClient())
{
float channelbonuses = 0.0f;
//AA that effect Spell channel chance are no longer on live. http://everquest.allakhazam.com/history/patches-2006-2.html
//No harm in maintaining the effects regardless, since we do check for channel chance.
if (IsFromItem)
channelbonuses += spellbonuses.ChannelChanceItems + itembonuses.ChannelChanceItems + aabonuses.ChannelChanceItems;
else
channelbonuses += spellbonuses.ChannelChanceSpells + itembonuses.ChannelChanceSpells + aabonuses.ChannelChanceSpells;
// max 93% chance at 252 skill
channelchance = 30 + GetSkill(EQ::skills::SkillChanneling) / 400.0f * 100;
channelchance -= attacked_count * 2;
channelchance += channelchance * channelbonuses / 100.0f;
}
#ifdef BOTS
else if(IsBot()) {
float channelbonuses = 0.0f;
if (IsFromItem)
channelbonuses += spellbonuses.ChannelChanceItems + itembonuses.ChannelChanceItems + aabonuses.ChannelChanceItems;
else
channelbonuses += spellbonuses.ChannelChanceSpells + itembonuses.ChannelChanceSpells + aabonuses.ChannelChanceSpells;
// max 93% chance at 252 skill
channelchance = 30 + GetSkill(EQ::skills::SkillChanneling) / 400.0f * 100;
channelchance -= attacked_count * 2;
channelchance += channelchance * channelbonuses / 100.0f;
}
#endif //BOTS
else {
// NPCs are just hard to interrupt, otherwise they get pwned
channelchance = 85;
channelchance -= attacked_count;
}
// as you get farther from your casting location,
// it gets squarely harder to regain concentration
if(GetX() != GetSpellX() || GetY() != GetSpellY())
{
d_x = std::abs(std::abs(GetX()) - std::abs(GetSpellX()));
d_y = std::abs(std::abs(GetY()) - std::abs(GetSpellY()));
if(d_x < 5 && d_y < 5)
{
//avoid the square root...
distance_moved = d_x * d_x + d_y * d_y;
// if you moved 1 unit, that's 25% off your chance to regain.
// if you moved 2, you lose 100% off your chance
distancemod = distance_moved * 25;
channelchance -= distancemod;
}
else
{
channelchance = 0;
}
}
LogSpells("Checking Interruption: spell x: [{}] spell y: [{}] cur x: [{}] cur y: [{}] channelchance [{}] channeling skill [{}]\n", GetSpellX(), GetSpellY(), GetX(), GetY(), channelchance, GetSkill(EQ::skills::SkillChanneling));
if(!spells[spell_id].uninterruptable && zone->random.Real(0, 100) > channelchance) {
LogSpells("Casting of [{}] canceled: interrupted", spell_id);
InterruptSpell();
return;
}
// if we got here, we regained concentration
regain_conc = true;
MessageString(Chat::Spells, REGAIN_AND_CONTINUE);
entity_list.MessageCloseString(
this,
true,
RuleI(Range, SpellMessages),
Chat::Spells,
OTHER_REGAIN_CAST,
this->GetCleanName());
}
}
// Check for consumables and Reagent focus items
// first check for component reduction
if(IsClient()) {
int reg_focus = CastToClient()->GetFocusEffect(focusReagentCost,spell_id);//Client only
/* it seems something causes some items not to consume reagents, it's not click type or temp flag
* it maybe cast time being instant, which I had a hard time disproving, so lets do that
* Items that might prove this wrong: Mystic Cloak (1057), Moss Mask (1400), and a bunch others
*/
if (item && item->GetItem() && item->GetItem()->CastTime == 0) {
LogSpells("Spell [{}]: Casted from instant clicky, prevent reagent consumption", spell_id);
} else if(zone->random.Roll(reg_focus)) {
LogSpells("Spell [{}]: Reagent focus item prevented reagent consumption ([{}] chance)", spell_id, reg_focus);
} else {
if(reg_focus > 0)
LogSpells("Spell [{}]: Reagent focus item failed to prevent reagent consumption ([{}] chance)", spell_id, reg_focus);
Client *c = this->CastToClient();
int component, component_count, inv_slot_id;
bool missingreags = false;
for(int t_count = 0; t_count < 4; t_count++) {
component = spells[spell_id].components[t_count];
component_count = spells[spell_id].component_counts[t_count];
if (component == -1)
continue;
// bard components are requirements for a certain instrument type, not a specific item
if(bard_song_mode) {
bool HasInstrument = true;
int InstComponent = spells[spell_id].NoexpendReagent[0];
switch (InstComponent) {
case -1:
continue; // no instrument required, go to next component
// percussion songs (13000 = hand drum)
case 13000:
if(itembonuses.percussionMod == 0) { // check for the appropriate instrument type
HasInstrument = false;
c->MessageString(Chat::Red, SONG_NEEDS_DRUM); // send an error message if missing
}
break;
// wind songs (13001 = wooden flute)
case 13001:
if(itembonuses.windMod == 0) {
HasInstrument = false;
c->MessageString(Chat::Red, SONG_NEEDS_WIND);
}
break;
// string songs (13011 = lute)
case 13011:
if(itembonuses.stringedMod == 0) {
HasInstrument = false;
c->MessageString(Chat::Red, SONG_NEEDS_STRINGS);
}
break;
// brass songs (13012 = horn)
case 13012:
if(itembonuses.brassMod == 0) {
HasInstrument = false;
c->MessageString(Chat::Red, SONG_NEEDS_BRASS);
}
break;
default: // some non-instrument component. Let it go, but record it in the log
LogSpells("Something odd happened: Song [{}] required component [{}]", spell_id, component);
}
if(!HasInstrument) { // if the instrument is missing, log it and interrupt the song
LogSpells("Song [{}]: Canceled. Missing required instrument [{}]", spell_id, component);
if(c->GetGM())
c->Message(Chat::White, "Your GM status allows you to finish casting even though you're missing a required instrument.");
else {
InterruptSpell();
return;
}
}
} // end bard component section
// handle the components for traditional casters
else {
if (!RuleB(Character, PetsUseReagents) && (IsEffectInSpell(spell_id, SE_SummonPet) || IsEffectInSpell(spell_id, SE_NecPet))) {
//bypass reagent cost
}
else if(c->GetInv().HasItem(component, component_count, invWhereWorn|invWherePersonal) == -1) // item not found
{
if (!missingreags)
{
c->MessageString(Chat::Red, MISSING_SPELL_COMP);
missingreags=true;
}
const EQ::ItemData *item = database.GetItem(component);
if(item) {
c->MessageString(Chat::Red, MISSING_SPELL_COMP_ITEM, item->Name);
LogSpells("Spell [{}]: Canceled. Missing required reagent [{}] ([{}])", spell_id, item->Name, component);
}
else {
char TempItemName[64];
strcpy((char*)&TempItemName, "UNKNOWN");
LogSpells("Spell [{}]: Canceled. Missing required reagent [{}] ([{}])", spell_id, TempItemName, component);
}
}
} // end bard/not bard ifs
} // end reagent loop
if (missingreags) {
if(c->GetGM())
c->Message(Chat::White, "Your GM status allows you to finish casting even though you're missing required components.");
else {
InterruptSpell();
return;
}
}
else if (!RuleB(Character, PetsUseReagents) && (IsEffectInSpell(spell_id, SE_SummonPet) || IsEffectInSpell(spell_id, SE_NecPet))) {
//bypass reagent cost
}
else if (!bard_song_mode)
{
int noexpend;
for(int t_count = 0; t_count < 4; t_count++) {
component = spells[spell_id].components[t_count];
noexpend = spells[spell_id].NoexpendReagent[t_count];
if (component == -1 || noexpend == component)
continue;
component_count = spells[spell_id].component_counts[t_count];
LogSpells("Spell [{}]: Consuming [{}] of spell component item id [{}]", spell_id, component_count, component);
// Components found, Deleting
// now we go looking for and deleting the items one by one
for(int s = 0; s < component_count; s++)
{
inv_slot_id = c->GetInv().HasItem(component, 1, invWhereWorn|invWherePersonal);
if(inv_slot_id != -1)
{
c->DeleteItemInInventory(inv_slot_id, 1, true);
}
else
{ // some kind of error in the code if this happens
c->Message(Chat::Red, "ERROR: reagent item disappeared while processing?");
}
}
}
} // end missingreags/consumption
} // end `focus did not help us`
} // end IsClient() for reagents
// this is common to both bard and non bard
// if this was cast from an inventory slot, check out the item that's there
int16 DeleteChargeFromSlot = -1;
if(IsClient() && (slot == CastingSlot::Item || slot == CastingSlot::PotionBelt)
&& inventory_slot != 0xFFFFFFFF) // 10 is an item
{
bool fromaug = false;
EQ::ItemData* augitem = nullptr;
uint32 recastdelay = 0;
int recasttype = 0;
while (true) {
if (item == nullptr)
break;
for (int r = EQ::invaug::SOCKET_BEGIN; r <= EQ::invaug::SOCKET_END; r++) {
const EQ::ItemInstance* aug_i = item->GetAugment(r);
if (!aug_i)
continue;
const EQ::ItemData* aug = aug_i->GetItem();
if (!aug)
continue;
if (aug->Click.Effect == spell_id)
{
recastdelay = aug_i->GetItem()->RecastDelay;
recasttype = aug_i->GetItem()->RecastType;
fromaug = true;
break;
}
}
break;
}
//Test the aug recast delay
if(IsClient() && fromaug && recastdelay > 0)
{
if(!CastToClient()->GetPTimers().Expired(&database, (pTimerItemStart + recasttype), false)) {
MessageString(Chat::Red, SPELL_RECAST);
LogSpells("Casting of [{}] canceled: item spell reuse timer not expired", spell_id);
StopCasting();
return;
}
else
{
//Can we start the timer here? I don't see why not.
CastToClient()->GetPTimers().Start((pTimerItemStart + recasttype), recastdelay);
if (recasttype != -1) {
database.UpdateItemRecastTimestamps(
CastToClient()->CharacterID(),
recasttype,
CastToClient()->GetPTimers().Get(pTimerItemStart + recasttype)->GetReadyTimestamp()
);
}
}
}
if (item && item->IsClassCommon() && (item->GetItem()->Click.Effect == spell_id) && item->GetCharges() || fromaug)
{
//const ItemData* item = item->GetItem();
int16 charges = item->GetItem()->MaxCharges;
if(fromaug) { charges = -1; } //Don't destroy the parent item
if(charges > -1) { // charged item, expend a charge
LogSpells("Spell [{}]: Consuming a charge from item [{}] ([{}]) which had [{}]/[{}] charges", spell_id, item->GetItem()->Name, item->GetItem()->ID, item->GetCharges(), item->GetItem()->MaxCharges);
DeleteChargeFromSlot = inventory_slot;
} else {
LogSpells("Spell [{}]: Cast from unlimited charge item [{}] ([{}]) ([{}] charges)", spell_id, item->GetItem()->Name, item->GetItem()->ID, item->GetItem()->MaxCharges);
}
}
else
{
LogSpells("Item used to cast spell [{}] was missing from inventory slot [{}] after casting!", spell_id, inventory_slot);
Message(Chat::Red, "Casting Error: Active casting item not found in inventory slot %i", inventory_slot);
InterruptSpell();
return;
}
}
// we're done casting, now try to apply the spell
if( !SpellFinished(spell_id, spell_target, slot, mana_used, inventory_slot, resist_adjust) )
{
LogSpells("Casting of [{}] canceled: SpellFinished returned false", spell_id);
// most of the cases we return false have a message already or are logic errors that shouldn't happen
// if there are issues I guess we can do something else, but this should work
StopCasting();
return;
}
if(IsClient()) {
CheckNumHitsRemaining(NumHit::MatchingSpells);
TrySympatheticProc(target, spell_id);
}
TryOnSpellFinished(this, target, spell_id); //Use for effects that should be checked after SpellFinished is completed.
TryTwincast(this, target, spell_id);
TryTriggerOnCastFocusEffect(focusTriggerOnCast, spell_id);
if(DeleteChargeFromSlot >= 0)
CastToClient()->DeleteItemInInventory(DeleteChargeFromSlot, 1, true);
//
// at this point the spell has successfully been cast
//
std::string export_string = fmt::format("{}", spell_id);
if(IsClient()) {
parse->EventPlayer(EVENT_CAST, CastToClient(), export_string, 0);
} else if(IsNPC()) {
parse->EventNPC(EVENT_CAST, CastToNPC(), nullptr, export_string, 0);
}
if(bard_song_mode)
{
if(IsClient())
{
Client *c = CastToClient();
if((IsFromItem && RuleB(Character, SkillUpFromItems)) || !IsFromItem) {
c->CheckSongSkillIncrease(spell_id);
}
if (spells[spell_id].EndurTimerIndex > 0 && slot < CastingSlot::MaxGems)
c->SetLinkedSpellReuseTimer(spells[spell_id].EndurTimerIndex, spells[spell_id].recast_time / 1000);
c->MemorizeSpell(static_cast<uint32>(slot), spell_id, memSpellSpellbar);
}
LogSpells("Bard song [{}] should be started", spell_id);
}
else
{
if(IsClient())
{
Client *c = CastToClient();
SendSpellBarEnable(spell_id);
// this causes the delayed refresh of the spell bar gems
if (spells[spell_id].EndurTimerIndex > 0 && slot < CastingSlot::MaxGems)
c->SetLinkedSpellReuseTimer(spells[spell_id].EndurTimerIndex, spells[spell_id].recast_time / 1000);
c->MemorizeSpell(static_cast<uint32>(slot), spell_id, memSpellSpellbar);
// this tells the client that casting may happen again
SetMana(GetMana());
// skills
if (EQ::skills::IsCastingSkill(spells[spell_id].skill) && ((IsFromItem && RuleB(Character, SkillUpFromItems)) || !IsFromItem)) {
c->CheckIncreaseSkill(spells[spell_id].skill, nullptr);
// increased chance of gaining channel skill if you regained concentration
c->CheckIncreaseSkill(EQ::skills::SkillChanneling, nullptr, regain_conc ? 5 : 0);
c->CheckSpecializeIncrease(spell_id);
}
}
}
// there should be no casting going on now
ZeroCastingVars();
// set the rapid recast timer for next time around
// Why do we have this? It mostly just causes issues when things are working correctly
// It also needs to be <users's ping to not cause issues
delaytimer = true;
spellend_timer.Start(10, true);
LogSpells("Spell casting of [{}] is finished", spell_id);
}
bool Mob::DetermineSpellTargets(uint16 spell_id, Mob *&spell_target, Mob *&ae_center, CastAction_type &CastAction, CastingSlot slot, bool isproc)
{
/*
The basic types of spells:
Single target - some might be undead only, self only, etc, but these
all affect the target of the caster.
AE around caster - these affect entities close to the caster, and have
no target.
AE around target - these have a target, and affect the target as well as
entities close to the target.
AE on location - this is a tricky one that is cast on a mob target but
has a special AE duration that keeps it recasting every 2.5 sec on the
same location. These work the same as AE around target spells, except
the target is a special beacon that's created when the spell is cast
Group - the caster is always affected, but there's more
targetgroupbuffs on - these affect the target and the target's group.
targetgroupbuffs off - no target, affects the caster's group.
Group Teleport - the caster plus his group are affected. these cannot
be targeted.
I think the string ID SPELL_NEED_TAR is wrong, it dosent seem to show up.
*/
// during this switch, this variable gets set to one of these things
// and that causes the spell to be executed differently
bodyType target_bt = BT_Humanoid;
SpellTargetType targetType = spells[spell_id].targettype;
bodyType mob_body = spell_target ? spell_target->GetBodyType() : BT_Humanoid;
if(IsPlayerIllusionSpell(spell_id)
&& spell_target != nullptr // null ptr crash safeguard
&& !spell_target->IsNPC() // still self only if NPC targetted
&& IsClient()
&& (IsGrouped() // still self only if not grouped
|| IsRaidGrouped())
&& (HasProjectIllusion())){
LogAA("Project Illusion overwrote target caster: [{}] spell id: [{}] was ON", GetName(), spell_id);
targetType = ST_GroupClientAndPet;
}
// NPC innate procs override the target type to single target.
// Yes. This code will cause issues if they have the proc as innate AND on a weapon. Oh well.
if (isproc && IsNPC() && CastToNPC()->GetInnateProcSpellID() == spell_id)
targetType = ST_Target;
if (spell_target && spells[spell_id].CastRestriction && !spell_target->PassCastRestriction(spells[spell_id].CastRestriction)){
Message(Chat::Red, "Your target does not meet the spell requirements."); //Current live also adds description after this from dbstr_us type 39
return false;
}
if (spells[spell_id].caster_requirement_id && !PassCastRestriction(spells[spell_id].caster_requirement_id)) {
MessageString(Chat::Red, SPELL_WOULDNT_HOLD);
return false;
}
//Must be out of combat. (If Beneficial checks casters combat state, Deterimental checks targets)
if (!spells[spell_id].InCombat && spells[spell_id].OutofCombat) {
if (IsDetrimentalSpell(spell_id)) {
if (spell_target &&
((spell_target->IsNPC() && spell_target->IsEngaged()) ||
(spell_target->IsClient() && spell_target->CastToClient()->GetAggroCount()))) {
MessageString(Chat::Red, SPELL_NO_EFFECT); // Unsure correct string
return false;
}
}
else if (IsBeneficialSpell(spell_id)) {
if ((IsNPC() && IsEngaged()) || (IsClient() && CastToClient()->GetAggroCount())) {
if (IsDiscipline(spell_id))
MessageString(Chat::Red, NO_ABILITY_IN_COMBAT);
else
MessageString(Chat::Red, NO_CAST_IN_COMBAT);
return false;
}
}
}
// Must be in combat. (If Beneficial checks casters combat state, Deterimental checks targets)
else if (spells[spell_id].InCombat && !spells[spell_id].OutofCombat) {
if (IsDetrimentalSpell(spell_id)) {
if (spell_target &&
((spell_target->IsNPC() && !spell_target->IsEngaged()) ||
(spell_target->IsClient() && !spell_target->CastToClient()->GetAggroCount()))) {
MessageString(Chat::Red, SPELL_NO_EFFECT); // Unsure correct string
return false;
}
}
else if (IsBeneficialSpell(spell_id)) {
if ((IsNPC() && !IsEngaged()) || (IsClient() && !CastToClient()->GetAggroCount())) {
if (IsDiscipline(spell_id))
MessageString(Chat::Red, NO_ABILITY_OUT_OF_COMBAT);
else
MessageString(Chat::Red, NO_CAST_OUT_OF_COMBAT);
return false;
}
}
}
switch (targetType)
{
// single target spells
case ST_Self:
{
spell_target = this;
CastAction = SingleTarget;
break;
}
case ST_TargetOptional:
{
if (!spell_target)
{
LogSpells("Spell [{}] canceled: invalid target (normal)", spell_id);
MessageString(Chat::Red, SPELL_NEED_TAR);
return false; // can't cast these unless we have a target
}
CastAction = SingleTarget;
break;
}
// target required for these
case ST_Undead: {
if(!spell_target || (
mob_body != BT_SummonedUndead
&& mob_body != BT_Undead
&& mob_body != BT_Vampire
)
)
{
//invalid target
LogSpells("Spell [{}] canceled: invalid target of body type [{}] (undead)", spell_id, mob_body);
if(!spell_target)
MessageString(Chat::Red,SPELL_NEED_TAR);
else
MessageString(Chat::Red,CANNOT_AFFECT_NPC);
return false;
}
CastAction = SingleTarget;
break;
}
case ST_Summoned: {
if(!spell_target || (mob_body != BT_Summoned && mob_body != BT_Summoned2 && mob_body != BT_Summoned3))
{
//invalid target
LogSpells("Spell [{}] canceled: invalid target of body type [{}] (summoned)", spell_id, mob_body);
MessageString(Chat::Red,SPELL_NEED_TAR);
return false;
}
CastAction = SingleTarget;
break;
}
case ST_SummonedPet:
{
if(!spell_target || (spell_target != GetPet()) ||
(mob_body != BT_Summoned && mob_body != BT_Summoned2 && mob_body != BT_Summoned3 && mob_body != BT_Animal))
{
LogSpells("Spell [{}] canceled: invalid target of body type [{}] (summoned pet)",
spell_id, mob_body);
MessageString(Chat::Red, SPELL_NEED_TAR);
return false;
}
CastAction = SingleTarget;
break;
}
//single body type target spells...
//this is a little hackish, but better than duplicating code IMO
case ST_Plant: if(target_bt == BT_Humanoid) target_bt = BT_Plant;
case ST_Dragon: if(target_bt == BT_Humanoid) target_bt = BT_Dragon;
case ST_Giant: if(target_bt == BT_Humanoid) target_bt = BT_Giant;
case ST_Animal: if(target_bt == BT_Humanoid) target_bt = BT_Animal;
// check for special case body types (Velious dragons/giants)
if(mob_body == BT_RaidGiant) mob_body = BT_Giant;
if(mob_body == BT_VeliousDragon) mob_body = BT_Dragon;
{
if(!spell_target || mob_body != target_bt)
{
//invalid target
LogSpells("Spell [{}] canceled: invalid target of body type [{}] (want body Type [{}])", spell_id, mob_body, target_bt);
if(!spell_target)
MessageString(Chat::Red,SPELL_NEED_TAR);
else
MessageString(Chat::Red,CANNOT_AFFECT_NPC);
return false;
}
CastAction = SingleTarget;
break;
}
case ST_Tap:
case ST_LDoNChest_Cursed:
case ST_Target: {
if(IsLDoNObjectSpell(spell_id))
{
if(!spell_target)
{
LogSpells("Spell [{}] canceled: invalid target (ldon object)", spell_id);
MessageString(Chat::Red,SPELL_NEED_TAR);
return false;
}
else
{
if(!spell_target->IsNPC())
{
LogSpells("Spell [{}] canceled: invalid target (normal)", spell_id);
MessageString(Chat::Red,SPELL_NEED_TAR);
return false;
}
if(spell_target->GetClass() != LDON_TREASURE)
{
LogSpells("Spell [{}] canceled: invalid target (normal)", spell_id);
MessageString(Chat::Red,SPELL_NEED_TAR);
return false;
}
}
}
if(!spell_target)
{
LogSpells("Spell [{}] canceled: invalid target (normal)", spell_id);
MessageString(Chat::Red,SPELL_NEED_TAR);
return false; // can't cast these unless we have a target
}
CastAction = SingleTarget;
break;
}
case ST_Corpse:
{
if(!spell_target || !spell_target->IsPlayerCorpse())
{
LogSpells("Spell [{}] canceled: invalid target (corpse)", spell_id);
uint32 message = ONLY_ON_CORPSES;
if(!spell_target) message = SPELL_NEED_TAR;
else if(!spell_target->IsCorpse()) message = ONLY_ON_CORPSES;
else if(!spell_target->IsPlayerCorpse()) message = CORPSE_NOT_VALID;
MessageString(Chat::Red, message);
return false;
}
CastAction = SingleTarget;
break;
}
case ST_Pet:
{
spell_target = GetPet();
if(!spell_target)
{
LogSpells("Spell [{}] canceled: invalid target (no pet)", spell_id);
MessageString(Chat::Red,NO_PET);
return false; // can't cast these unless we have a target
}
CastAction = SingleTarget;
break;
}
case ST_AEBard:
case ST_AECaster:
case ST_AEClientV1:
{
spell_target = nullptr;
ae_center = this;
CastAction = AECaster;
break;
}
case ST_HateList:
{
spell_target = nullptr;
ae_center = this;
CastAction = CAHateList;
break;
}
case ST_AETargetHateList:
{
if (spells[spell_id].range > 0)
{
if(!spell_target)
return false;
ae_center = spell_target;
CastAction = AETarget;
}
else {
spell_target = nullptr;
ae_center = this;
CastAction = CAHateList;
}
break;
}
case ST_AreaClientOnly:
case ST_AreaNPCOnly:
{
if (spells[spell_id].range > 0)
{
if(!spell_target)
return false;
ae_center = spell_target;
CastAction = AETarget;
}
else {
spell_target = nullptr;
ae_center = this;
CastAction = AECaster;
}
break;
}
case ST_UndeadAE: //should only affect undead...
case ST_SummonedAE:
case ST_TargetAETap:
case ST_AETarget:
case ST_TargetAENoPlayersPets:
{
if(!spell_target)
{
LogSpells("Spell [{}] canceled: invalid target (AOE)", spell_id);
MessageString(Chat::Red,SPELL_NEED_TAR);
return false;
}
ae_center = spell_target;
CastAction = AETarget;
break;
}
// Group spells
case ST_GroupTeleport:
case ST_Group:
case ST_GroupNoPets:
{
if(IsClient() && CastToClient()->TGB() && IsTGBCompatibleSpell(spell_id) && (slot != CastingSlot::Item || RuleB(Spells, AllowItemTGB))) {
if( (!target) ||
(target->IsNPC() && !(target->GetOwner() && target->GetOwner()->IsClient())) ||
(target->IsCorpse()) )
spell_target = this;
else
spell_target = target;
} else {
spell_target = this;
}
if (spell_target && spell_target->IsPet() && spells[spell_id].targettype == ST_GroupNoPets){
MessageString(Chat::Red,NO_CAST_ON_PET);
return false;
}
CastAction = GroupSpell;
break;
}
case ST_GroupClientAndPet:
{
if(!spell_target)
{
LogSpells("Spell [{}] canceled: invalid target (Group Required: Single Target)", spell_id);
MessageString(Chat::Red,SPELL_NEED_TAR);
return false;
}
if(spell_target != this)
{
if(spell_target == GetPet())
{
CastAction = SingleTarget;
}
else if(spell_target == GetOwner())
{
CastAction = SingleTarget;
}
else
{
uint32 group_id_caster = 0;
uint32 group_id_target = 0;
if(IsClient())
{
if(IsGrouped())
{
if (Group* group = GetGroup()) {
group_id_caster = group->GetID();
}
}
else if(IsRaidGrouped())
{
if (Raid* raid = GetRaid()) {
uint32 group_id = raid->GetGroup(CastToClient());
group_id_caster = (group_id == 0xFFFFFFFF) ? 0 : (group_id + 1);
}
}
}
else if(IsPet())
{
Mob *owner = GetOwner();
if(owner->IsGrouped())
{
if (Group* group = owner->GetGroup()) {
group_id_caster = group->GetID();
}
}
else if(owner->IsRaidGrouped())
{
if (Raid* raid = owner->GetRaid()) {
uint32 group_id = raid->GetGroup(owner->CastToClient());
group_id_caster = (group_id == 0xFFFFFFFF) ? 0 : (group_id + 1);
}
}
}
#ifdef BOTS
else if(IsBot())
{
if(IsGrouped())
{
group_id_caster = GetGroup()->GetID();
}
else if(IsRaidGrouped())
{
if(GetOwner())
group_id_caster = (GetRaid()->GetGroup(GetOwner()->CastToClient()) == 0xFFFF) ? 0 : (GetRaid()->GetGroup(GetOwner()->CastToClient()) + 1);
}
}
#endif //BOTS
if(spell_target->IsClient())
{
if(spell_target->IsGrouped())
{
if (Group* group = spell_target->GetGroup()) {
group_id_target = group->GetID();
}
}
else if(spell_target->IsRaidGrouped())
{
if (Raid* raid = spell_target->GetRaid()) {
uint32 group_id = raid->GetGroup(spell_target->CastToClient());
group_id_target = (group_id == 0xFFFFFFFF) ? 0 : (group_id + 1);
}
}
}
else if(spell_target->IsPet())
{
Mob *owner = spell_target->GetOwner();
if(owner->IsGrouped())
{
if (Group* group = owner->GetGroup()) {
group_id_target = group->GetID();
}
}
else if(owner->IsRaidGrouped())
{
if (Raid* raid = owner->GetRaid()) {
uint32 group_id = raid->GetGroup(owner->CastToClient());
group_id_target = (group_id == 0xFFFFFFFF) ? 0 : (group_id + 1);
}
}
}
#ifdef BOTS
else if(spell_target->IsBot())
{
if(spell_target->IsGrouped())
{
group_id_target = spell_target->GetGroup()->GetID();
}
else if(spell_target->IsRaidGrouped())
{
if(spell_target->GetOwner())
group_id_target = (spell_target->GetRaid()->GetGroup(spell_target->GetOwner()->CastToClient()) == 0xFFFF) ? 0 : (spell_target->GetRaid()->GetGroup(spell_target->GetOwner()->CastToClient()) + 1);
}
}
#endif //BOTS
if(group_id_caster == 0 || group_id_target == 0)
{
LogSpells("Spell [{}] canceled: Attempted to cast a Single Target Group spell on a ungrouped member", spell_id);
MessageString(Chat::Red, TARGET_GROUP_MEMBER);
return false;
}
if(group_id_caster != group_id_target)
{
LogSpells("Spell [{}] canceled: Attempted to cast a Single Target Group spell on a ungrouped member", spell_id);
MessageString(Chat::Red, TARGET_GROUP_MEMBER);
return false;
}
CastAction = SingleTarget;
}
}
else
{
CastAction = SingleTarget;
}
break;
}
case ST_Directional:
CastAction = DirectionalAE;
spell_target = nullptr;
ae_center = nullptr;
break;
case ST_TargetsTarget:
{
Mob *spell_target_tot = spell_target ? spell_target->GetTarget() : nullptr;
if(!spell_target_tot)
return false;
//Verfied from live - Target's Target needs to be in combat range to recieve the effect
if (!this->CombatRange(spell_target))
return false;
spell_target = spell_target_tot;
CastAction = SingleTarget;
break;
}
case ST_PetMaster:
{
Mob *owner = nullptr;
if (IsPet())
owner = GetOwner();
else if ((IsNPC() && CastToNPC()->GetSwarmOwner()))
owner = entity_list.GetMobID(CastToNPC()->GetSwarmOwner());
if (!owner)
return false;
spell_target = owner;
CastAction = SingleTarget;
break;
}
case ST_Beam:
{
CastAction = Beam;
spell_target = nullptr;
ae_center = nullptr;
break;
}
case ST_Ring:
{
CastAction = TargetRing;
spell_target = nullptr;
ae_center = nullptr;
break;
}
default:
{
LogSpells("I dont know Target Type: [{}] Spell: ([{}]) [{}]", spells[spell_id].targettype, spell_id, spells[spell_id].name);
Message(0, "I dont know Target Type: %d Spell: (%d) %s", spells[spell_id].targettype, spell_id, spells[spell_id].name);
CastAction = CastActUnknown;
break;
}
}
return(true);
}
// only used from CastedSpellFinished, and procs
// we can't interrupt in this, or anything called from this!
// if you need to abort the casting, return false
bool Mob::SpellFinished(uint16 spell_id, Mob *spell_target, CastingSlot slot, uint16 mana_used,
uint32 inventory_slot, int16 resist_adjust, bool isproc, int level_override)
{
//EQApplicationPacket *outapp = nullptr;
Mob *ae_center = nullptr;
if(!IsValidSpell(spell_id))
return false;
//Death Touch targets the pet owner instead of the pet when said pet is tanking.
if ((RuleB(Spells, CazicTouchTargetsPetOwner) && spell_target && spell_target->HasOwner()) && spell_id == SPELL_CAZIC_TOUCH || spell_id == SPELL_TOUCH_OF_VINITRAS) {
Mob* owner = spell_target->GetOwner();
if (owner) {
spell_target = owner;
}
}
//Guard Assist Code
if (RuleB(Character, PVPEnableGuardFactionAssist) && spell_target && IsDetrimentalSpell(spell_id) && spell_target != this) {
if (IsClient() && spell_target->IsClient()|| (HasOwner() && GetOwner()->IsClient() && spell_target->IsClient())) {
auto& mob_list = entity_list.GetCloseMobList(spell_target);
for (auto& e : mob_list) {
auto mob = e.second;
if (mob->IsNPC() && mob->CastToNPC()->IsGuard()) {
float distance = Distance(spell_target->GetPosition(), mob->GetPosition());
if ((mob->CheckLosFN(spell_target) || mob->CheckLosFN(this)) && distance <= 70) {
auto petorowner = GetOwnerOrSelf();
if (spell_target->GetReverseFactionCon(mob) <= petorowner->GetReverseFactionCon(mob)) {
mob->AddToHateList(this);
}
}
}
}
}
}
if( spells[spell_id].zonetype == 1 && !zone->CanCastOutdoor()){
if(IsClient()){
if(!CastToClient()->GetGM()){
MessageString(Chat::Red, CAST_OUTDOORS);
return false;
}
}
}
if(IsEffectInSpell(spell_id, SE_Levitate) && !zone->CanLevitate()){
if(IsClient()){
if(!CastToClient()->GetGM()){
Message(Chat::Red, "You can't levitate in this zone.");
return false;
}
}
}
if(IsClient() && !CastToClient()->GetGM()){
if(zone->IsSpellBlocked(spell_id, glm::vec3(GetPosition()))){
const char *msg = zone->GetSpellBlockedMessage(spell_id, glm::vec3(GetPosition()));
if(msg){
Message(Chat::Red, msg);
return false;
}
else{
Message(Chat::Red, "You can't cast this spell here.");
return false;
}
}
}
if (IsClient() && CastToClient()->GetGM()){
if (zone->IsSpellBlocked(spell_id, glm::vec3(GetPosition()))){
LogSpells("GM Cast Blocked Spell: [{}] (ID [{}])", GetSpellName(spell_id), spell_id);
}
}
if
(
this->IsClient() &&
(zone->GetZoneID() == 183 || zone->GetZoneID() == 184) && // load
CastToClient()->Admin() < 80
)
{
if
(
IsEffectInSpell(spell_id, SE_Gate) ||
IsEffectInSpell(spell_id, SE_Translocate) ||
IsEffectInSpell(spell_id, SE_Teleport)
)
{
Message(0, "The Gods brought you here, only they can send you away.");
return false;
}
}
//determine the type of spell target we have
CastAction_type CastAction;
if(!DetermineSpellTargets(spell_id, spell_target, ae_center, CastAction, slot, isproc))
return(false);
LogSpells("Spell [{}]: target type [{}], target [{}], AE center [{}]", spell_id, CastAction, spell_target?spell_target->GetName():"NONE", ae_center?ae_center->GetName():"NONE");
// if a spell has the AEDuration flag, it becomes an AE on target
// spell that's recast every 2500 msec for AEDuration msec. There are
// spells of all kinds of target types that do this, strangely enough
// TODO: finish this
if(IsAEDurationSpell(spell_id)) {
// the spells are AE target, but we aim them on a beacon
Mob *beacon_loc = spell_target ? spell_target : this;
auto beacon = new Beacon(beacon_loc, spells[spell_id].AEDuration);
entity_list.AddBeacon(beacon);
LogSpells("Spell [{}]: AE duration beacon created, entity id [{}]", spell_id, beacon->GetName());
spell_target = nullptr;
ae_center = beacon;
CastAction = AECaster;
}
// check line of sight to target if it's a detrimental spell
if(!spells[spell_id].npc_no_los && spell_target && IsDetrimentalSpell(spell_id) && !CheckLosFN(spell_target) && !IsHarmonySpell(spell_id) && spells[spell_id].targettype != ST_TargetOptional)
{
LogSpells("Spell [{}]: cannot see target [{}]", spell_id, spell_target->GetName());
MessageString(Chat::Red,CANT_SEE_TARGET);
return false;
}
// check to see if target is a caster mob before performing a mana tap
if(spell_target && IsManaTapSpell(spell_id)) {
if(spell_target->GetCasterClass() == 'N') {
MessageString(Chat::Red, TARGET_NO_MANA);
return false;
}
}
//range check our target, if we have one and it is not us
float range = spells[spell_id].range + GetRangeDistTargetSizeMod(spell_target);
if(IsClient() && CastToClient()->TGB() && IsTGBCompatibleSpell(spell_id) && IsGroupSpell(spell_id))
range = spells[spell_id].aoerange;
range = GetActSpellRange(spell_id, range);
if(IsPlayerIllusionSpell(spell_id)
&& IsClient()
&& (HasProjectIllusion())){
range = 100;
}
if(spell_target != nullptr && spell_target != this) {
//casting a spell on somebody but ourself, make sure they are in range
float dist2 = DistanceSquared(m_Position, spell_target->GetPosition());
float range2 = range * range;
float min_range2 = spells[spell_id].min_range * spells[spell_id].min_range;
if(dist2 > range2) {
//target is out of range.
LogSpells("Spell [{}]: Spell target is out of range (squared: [{}] > [{}])", spell_id, dist2, range2);
MessageString(Chat::Red, TARGET_OUT_OF_RANGE);
return(false);
}
else if (dist2 < min_range2){
//target is too close range.
LogSpells("Spell [{}]: Spell target is too close (squared: [{}] < [{}])", spell_id, dist2, min_range2);
MessageString(Chat::Red, TARGET_TOO_CLOSE);
return(false);
}
spell_target->CalcSpellPowerDistanceMod(spell_id, dist2);
}
//AE Duration spells were ignoring distance check from item clickies
if(ae_center != nullptr && ae_center != this) {
//casting a spell on somebody but ourself, make sure they are in range
float dist2 = DistanceSquared(m_Position, ae_center->GetPosition());
float range2 = range * range;
float min_range2 = spells[spell_id].min_range * spells[spell_id].min_range;
if(dist2 > range2) {
//target is out of range.
LogSpells("Spell [{}]: Spell target is out of range (squared: [{}] > [{}])", spell_id, dist2, range2);
MessageString(Chat::Red, TARGET_OUT_OF_RANGE);
return(false);
}
else if (dist2 < min_range2){
//target is too close range.
LogSpells("Spell [{}]: Spell target is too close (squared: [{}] < [{}])", spell_id, dist2, min_range2);
MessageString(Chat::Red, TARGET_TOO_CLOSE);
return(false);
}
ae_center->CalcSpellPowerDistanceMod(spell_id, dist2);
}
//
// Switch #2 - execute the spell
//
switch(CastAction)
{
default:
case CastActUnknown:
case SingleTarget:
{
#ifdef BOTS
if(IsBot()) {
bool StopLogic = false;
if(!this->CastToBot()->DoFinishedSpellSingleTarget(spell_id, spell_target, slot, StopLogic))
return false;
if(StopLogic)
break;
}
#endif //BOTS
if(spell_target == nullptr) {
LogSpells("Spell [{}]: Targeted spell, but we have no target", spell_id);
return(false);
}
if (isproc) {
SpellOnTarget(spell_id, spell_target, 0, true, resist_adjust, true, level_override);
} else {
if (spells[spell_id].targettype == ST_TargetOptional){
if (!TrySpellProjectile(spell_target, spell_id))
return false;
}
else if(!SpellOnTarget(spell_id, spell_target, 0, true, resist_adjust, false, level_override)) {
if(IsBuffSpell(spell_id) && IsBeneficialSpell(spell_id)) {
// Prevent mana usage/timers being set for beneficial buffs
if(casting_spell_aa_id)
InterruptSpell();
return false;
}
}
}
if(IsPlayerIllusionSpell(spell_id)
&& IsClient()
&& (HasProjectIllusion())){
LogAA("Effect Project Illusion for [{}] on spell id: [{}] was ON", GetName(), spell_id);
SetProjectIllusion(false);
}
else{
LogAA("Effect Project Illusion for [{}] on spell id: [{}] was OFF", GetName(), spell_id);
}
break;
}
case AECaster:
case AETarget:
{
#ifdef BOTS
if(IsBot()) {
bool StopLogic = false;
if(!this->CastToBot()->DoFinishedSpellAETarget(spell_id, spell_target, slot, StopLogic))
return false;
if(StopLogic)
break;
}
#endif //BOTS
// we can't cast an AE spell without something to center it on
assert(ae_center != nullptr);
if(ae_center->IsBeacon()) {
// special ae duration spell
ae_center->CastToBeacon()->AELocationSpell(this, spell_id, resist_adjust);
} else {
// unsure if we actually need this? Need to find some spell examples
if(ae_center && ae_center == this && IsBeneficialSpell(spell_id))
SpellOnTarget(spell_id, this);
// NPCs should never be affected by an AE they cast. PB AEs shouldn't affect caster either
// I don't think any other cases that get here matter
bool affect_caster = !IsNPC() && spells[spell_id].targettype != ST_AECaster;
if (spells[spell_id].targettype == ST_AETargetHateList)
hate_list.SpellCast(this, spell_id, spells[spell_id].aoerange, ae_center);
else
entity_list.AESpell(this, ae_center, spell_id, affect_caster, resist_adjust);
}
break;
}
case GroupSpell:
{
#ifdef BOTS
if(IsBot()) {
bool StopLogic = false;
if(!this->CastToBot()->DoFinishedSpellGroupTarget(spell_id, spell_target, slot, StopLogic))
return false;
if(StopLogic)
break;
}
#endif //BOTS
// We hold off turning MBG off so we can still use it to calc the mana cost
if(spells[spell_id].can_mgb && HasMGB())
{
SpellOnTarget(spell_id, this);
entity_list.MassGroupBuff(this, this, spell_id, true);
}
else
{
// at this point spell_target is a member of the other group, or the
// caster if they're not using TGB
// NOTE: this will always hit the caster, plus the target's group so
// it can affect up to 7 people if the targeted group is not our own
// Allow pets who cast group spells to affect the group.
if (spell_target->IsPetOwnerClient() && IsPetOwnerClient()){
Mob* owner = spell_target->GetOwner();
if (owner)
spell_target = owner;
}
if(spell_target->IsGrouped())
{
Group *target_group = entity_list.GetGroupByMob(spell_target);
if(target_group)
{
target_group->CastGroupSpell(this, spell_id);
}
}
else if(spell_target->IsRaidGrouped() && spell_target->IsClient())
{
Raid *target_raid = entity_list.GetRaidByClient(spell_target->CastToClient());
uint32 gid = 0xFFFFFFFF;
if(target_raid){
gid = target_raid->GetGroup(spell_target->GetName());
if(gid < 12)
target_raid->CastGroupSpell(this, spell_id, gid);
else
SpellOnTarget(spell_id, spell_target);
}
}
else
{
// if target is grouped, CastGroupSpell will cast it on the caster
// too, but if not then we have to do that here.
if(spell_target != this){
SpellOnTarget(spell_id, this);
#ifdef GROUP_BUFF_PETS
//pet too
if (spells[spell_id].targettype != ST_GroupNoPets && GetPet() && HasPetAffinity() && !GetPet()->IsCharmed())
SpellOnTarget(spell_id, GetPet());
#endif
}
SpellOnTarget(spell_id, spell_target);
#ifdef GROUP_BUFF_PETS
//pet too
if (spells[spell_id].targettype != ST_GroupNoPets && spell_target->GetPet() && spell_target->HasPetAffinity() && !spell_target->GetPet()->IsCharmed())
SpellOnTarget(spell_id, spell_target->GetPet());
#endif
}
}
break;
}
case CAHateList:
{
if(!IsClient())
{
hate_list.SpellCast(this, spell_id, spells[spell_id].range > spells[spell_id].aoerange ? spells[spell_id].range : spells[spell_id].aoerange);
}
break;
}
case DirectionalAE:
{
ConeDirectional(spell_id, resist_adjust);
break;
}
case Beam:
{
BeamDirectional(spell_id, resist_adjust);
break;
}
case TargetRing:
{
entity_list.AESpell(this, nullptr, spell_id, false, resist_adjust);
break;
}
}
// Set and send the nimbus effect if this spell has one
int NimbusEffect = GetNimbusEffect(spell_id);
if(NimbusEffect) {
if(!IsNimbusEffectActive(NimbusEffect)) {
SendSpellEffect(NimbusEffect, 500, 0, 1, 3000, true);
}
}
bool mgb = HasMGB() && spells[spell_id].can_mgb;
// if this was a spell slot or an ability use up the mana for it
if(slot != CastingSlot::Item && slot != CastingSlot::PotionBelt && mana_used > 0)
{
mana_used = GetActSpellCost(spell_id, mana_used);
if (mgb) {
mana_used *= 2;
}
// clamp if we some how got focused above our current mana
if (GetMana() < mana_used)
mana_used = GetMana();
LogSpells("Spell [{}]: consuming [{}] mana", spell_id, mana_used);
if (!DoHPToManaCovert(mana_used)) {
SetMana(GetMana() - mana_used);
TryTriggerOnCastRequirement();
}
}
// one may want to check if this is a disc or not, but we actually don't, there are non disc stuff that have end cost
// lets not consume end for custom items that have disc procs.
// One might also want to filter out USE_ITEM_SPELL_SLOT, but DISCIPLINE_SPELL_SLOT are both #defined to the same thing ...
if (spells[spell_id].EndurCost && !isproc) {
auto end_cost = spells[spell_id].EndurCost;
if (mgb)
end_cost *= 2;
SetEndurance(GetEndurance() - EQ::ClampUpper(end_cost, GetEndurance()));
TryTriggerOnCastRequirement();
}
if (mgb)
SetMGB(false);
//set our reuse timer on long ass reuse_time spells...
if(IsClient() && !isproc)
{
if(casting_spell_aa_id) {
AA::Rank *rank = zone->GetAlternateAdvancementRank(casting_spell_aa_id);
if(rank && rank->base_ability) {
ExpendAlternateAdvancementCharge(rank->base_ability->id);
}
}
else if(spell_id == casting_spell_id && casting_spell_timer != 0xFFFFFFFF)
{
//aa new todo: aa expendable charges here
CastToClient()->GetPTimers().Start(casting_spell_timer, casting_spell_timer_duration);
LogSpells("Spell [{}]: Setting custom reuse timer [{}] to [{}]", spell_id, casting_spell_timer, casting_spell_timer_duration);
}
else if(spells[spell_id].recast_time > 1000 && !spells[spell_id].IsDisciplineBuff) {
int recast = spells[spell_id].recast_time/1000;
if (spell_id == SPELL_LAY_ON_HANDS) //lay on hands
{
recast -= GetAA(aaFervrentBlessing) * 420;
}
else if (spell_id == SPELL_HARM_TOUCH || spell_id == SPELL_HARM_TOUCH2) //harm touch
{
recast -= GetAA(aaTouchoftheWicked) * 420;
}
int reduction = CastToClient()->GetFocusEffect(focusReduceRecastTime, spell_id);//Client only
if(reduction)
recast -= reduction;
LogSpells("Spell [{}]: Setting long reuse timer to [{}] s (orig [{}])", spell_id, recast, spells[spell_id].recast_time);
CastToClient()->GetPTimers().Start(pTimerSpellStart + spell_id, recast);
}
}
if(IsClient() && (slot == CastingSlot::Item || slot == CastingSlot::PotionBelt))
{
EQ::ItemInstance *itm = CastToClient()->GetInv().GetItem(inventory_slot);
if(itm && itm->GetItem()->RecastDelay > 0){
auto recast_type = itm->GetItem()->RecastType;
CastToClient()->GetPTimers().Start((pTimerItemStart + recast_type), itm->GetItem()->RecastDelay);
if (recast_type != -1) {
database.UpdateItemRecastTimestamps(
CastToClient()->CharacterID(),
recast_type,
CastToClient()->GetPTimers().Get(pTimerItemStart + recast_type)->GetReadyTimestamp()
);
}
auto outapp = new EQApplicationPacket(OP_ItemRecastDelay, sizeof(ItemRecastDelay_Struct));
ItemRecastDelay_Struct *ird = (ItemRecastDelay_Struct *)outapp->pBuffer;
ird->recast_delay = itm->GetItem()->RecastDelay;
ird->recast_type = recast_type;
CastToClient()->QueuePacket(outapp);
safe_delete(outapp);
}
}
if(IsNPC())
CastToNPC()->AI_Event_SpellCastFinished(true, static_cast<uint16>(slot));
return true;
}
/*
* handle bard song pulses...
*
* we make several assumptions that SpellFinished does not:
* - there are no AEDuration (beacon) bard songs
* - there are no recourse spells on bard songs
* - there is no long recast delay on bard songs
*
* return false to stop the song
*/
bool Mob::ApplyNextBardPulse(uint16 spell_id, Mob *spell_target, CastingSlot slot) {
if(slot == CastingSlot::Item) {
//bard songs should never come from items...
LogSpells("Bard Song Pulse [{}]: Supposidly cast from an item. Killing song", spell_id);
return(false);
}
//determine the type of spell target we have
Mob *ae_center = nullptr;
CastAction_type CastAction;
if(!DetermineSpellTargets(spell_id, spell_target, ae_center, CastAction, slot)) {
LogSpells("Bard Song Pulse [{}]: was unable to determine target. Stopping", spell_id);
return(false);
}
if(ae_center != nullptr && ae_center->IsBeacon()) {
LogSpells("Bard Song Pulse [{}]: Unsupported Beacon NPC AE spell", spell_id);
return(false);
}
//use mana, if this spell has a mana cost
int mana_used = spells[spell_id].mana;
if(mana_used > 0) {
if(mana_used > GetMana()) {
//ran out of mana... this calls StopSong() for us
LogSpells("Ran out of mana while singing song [{}]", spell_id);
return(false);
}
LogSpells("Bard Song Pulse [{}]: consuming [{}] mana (have [{}])", spell_id, mana_used, GetMana());
SetMana(GetMana() - mana_used);
}
// check line of sight to target if it's a detrimental spell
if(spell_target && IsDetrimentalSpell(spell_id) && !CheckLosFN(spell_target))
{
LogSpells("Bard Song Pulse [{}]: cannot see target [{}]", spell_target->GetName());
MessageString(Chat::Red, CANT_SEE_TARGET);
return(false);
}
//range check our target, if we have one and it is not us
float range = 0.00f;
range = GetActSpellRange(spell_id, spells[spell_id].range, true);
if(spell_target != nullptr && spell_target != this) {
//casting a spell on somebody but ourself, make sure they are in range
float dist2 = DistanceSquared(m_Position, spell_target->GetPosition());
float range2 = range * range;
if(dist2 > range2) {
//target is out of range.
LogSpells("Bard Song Pulse [{}]: Spell target is out of range (squared: [{}] > [{}])", spell_id, dist2, range2);
MessageString(Chat::Red, TARGET_OUT_OF_RANGE);
return(false);
}
}
//
// Switch #2 - execute the spell
//
switch(CastAction)
{
default:
case CastActUnknown:
case SingleTarget:
{
if(spell_target == nullptr) {
LogSpells("Bard Song Pulse [{}]: Targeted spell, but we have no target", spell_id);
return(false);
}
LogSpells("Bard Song Pulse: Targeted. spell [{}], target [{}]", spell_id, spell_target->GetName());
spell_target->BardPulse(spell_id, this);
break;
}
case AECaster:
{
if(IsBeneficialSpell(spell_id))
SpellOnTarget(spell_id, this);
bool affect_caster = !IsNPC(); //NPC AE spells do not affect the NPC caster
entity_list.AEBardPulse(this, this, spell_id, affect_caster);
break;
}
case AETarget:
{
// we can't cast an AE spell without something to center it on
if(ae_center == nullptr) {
LogSpells("Bard Song Pulse [{}]: AE Targeted spell, but we have no target", spell_id);
return(false);
}
// regular PB AE or targeted AE spell - spell_target is null if PB
if(spell_target) { // this must be an AETarget spell
// affect the target too
spell_target->BardPulse(spell_id, this);
LogSpells("Bard Song Pulse: spell [{}], AE target [{}]", spell_id, spell_target->GetName());
} else {
LogSpells("Bard Song Pulse: spell [{}], AE with no target", spell_id);
}
bool affect_caster = !IsNPC(); //NPC AE spells do not affect the NPC caster
entity_list.AEBardPulse(this, ae_center, spell_id, affect_caster);
break;
}
case GroupSpell:
{
if(spell_target->IsGrouped()) {
LogSpells("Bard Song Pulse: spell [{}], Group targeting group of [{}]", spell_id, spell_target->GetName());
Group *target_group = entity_list.GetGroupByMob(spell_target);
if(target_group)
target_group->GroupBardPulse(this, spell_id);
}
else if(spell_target->IsRaidGrouped() && spell_target->IsClient()) {
LogSpells("Bard Song Pulse: spell [{}], Raid group targeting raid group of [{}]", spell_id, spell_target->GetName());
Raid *r = entity_list.GetRaidByClient(spell_target->CastToClient());
if(r){
uint32 gid = r->GetGroup(spell_target->GetName());
if(gid < 12){
r->GroupBardPulse(this, spell_id, gid);
}
else{
BardPulse(spell_id, this);
#ifdef GROUP_BUFF_PETS
if (GetPet() && HasPetAffinity() && !GetPet()->IsCharmed())
GetPet()->BardPulse(spell_id, this);
#endif
}
}
}
else {
LogSpells("Bard Song Pulse: spell [{}], Group target without group. Affecting caster", spell_id);
BardPulse(spell_id, this);
#ifdef GROUP_BUFF_PETS
if (GetPet() && HasPetAffinity() && !GetPet()->IsCharmed())
GetPet()->BardPulse(spell_id, this);
#endif
}
break;
}
}
if(IsClient())
CastToClient()->CheckSongSkillIncrease(spell_id);
return(true);
}
void Mob::BardPulse(uint16 spell_id, Mob *caster) {
// so for Solon's Song of the Sirens (725) if we're repulsing, we need to skip
// other charms have mana and don't repulse
// This is probably not the ideal place for this, but it will work
if (IsCharmed() && GetOwner() == caster && IsEffectInSpell(spell_id, SE_Charm)) {
return;
}
int buffs_i;
int buff_count = GetMaxTotalSlots();
for (buffs_i = 0; buffs_i < buff_count; buffs_i++) {
if(buffs[buffs_i].spellid != spell_id)
continue;
if(buffs[buffs_i].casterid != caster->GetID()) {
LogSpells("Bard Pulse for [{}]: found buff from caster [{}] and we are pulsing for [{}] are there two bards playing the same song???", spell_id, buffs[buffs_i].casterid, caster->GetID());
return;
}
//extend the spell if it will expire before the next pulse
if(buffs[buffs_i].ticsremaining <= 3) {
buffs[buffs_i].ticsremaining += 3;
LogSpells("Bard Song Pulse [{}]: extending duration in slot [{}] to [{}] tics", spell_id, buffs_i, buffs[buffs_i].ticsremaining);
}
//should we send this buff update to the client... seems like it would
//be a lot of traffic for no reason...
//this may be the wrong packet...
if(IsClient()) {
auto packet = new EQApplicationPacket(OP_Action, sizeof(Action_Struct));
Action_Struct* action = (Action_Struct*) packet->pBuffer;
action->source = caster->GetID();
action->target = GetID();
action->spell = spell_id;
action->force = spells[spell_id].pushback;
action->hit_heading = GetHeading();
action->hit_pitch = spells[spell_id].pushup;
action->instrument_mod = caster->GetInstrumentMod(spell_id);
action->effect_flag = 0;
action->spell_level = action->level = buffs[buffs_i].casterlevel;
action->type = DamageTypeSpell;
entity_list.QueueCloseClients(this, packet, false, RuleI(Range, SongMessages), 0, true, IsClient() ? FilterPCSpells : FilterNPCSpells);
action->effect_flag = 4;
if (spells[spell_id].pushback != 0.0f || spells[spell_id].pushup != 0.0f)
{
if (IsClient())
{
if (!IsBuffSpell(spell_id))
{
CastToClient()->cheat_manager.SetExemptStatus(KnockBack, true);
}
}
}
if (IsClient() && IsEffectInSpell(spell_id, SE_ShadowStep))
{
CastToClient()->cheat_manager.SetExemptStatus(ShadowStep, true);
}
if(!IsEffectInSpell(spell_id, SE_BindAffinity))
{
CastToClient()->QueuePacket(packet);
}
auto message_packet = new EQApplicationPacket(OP_Damage, sizeof(CombatDamage_Struct));
CombatDamage_Struct *cd = (CombatDamage_Struct *)message_packet->pBuffer;
cd->target = action->target;
cd->source = action->source;
cd->type = DamageTypeSpell;
cd->spellid = action->spell;
cd->force = action->force;
cd->hit_heading = action->hit_heading;
cd->hit_pitch = action->hit_pitch;
cd->damage = 0;
if(!IsEffectInSpell(spell_id, SE_BindAffinity))
{
entity_list.QueueCloseClients(this, message_packet, false, RuleI(Range, SongMessages), 0, true, IsClient() ? FilterPCSpells : FilterNPCSpells);
}
safe_delete(message_packet);
safe_delete(packet);
}
//we are done...
return;
}
LogSpells("Bard Song Pulse [{}]: Buff not found, reapplying spell", spell_id);
//this spell is not affecting this mob, apply it.
caster->SpellOnTarget(spell_id, this);
}
///////////////////////////////////////////////////////////////////////////////
// buff related functions
// returns how many _ticks_ the buff will last.
// a tick is 6 seconds
// this is the place to figure out random duration buffs like fear and charm.
// both the caster and target mobs are passed in, so different behavior can
// even be created depending on the types of mobs involved
//
// right now this is just an outline, working on this..
int Mob::CalcBuffDuration(Mob *caster, Mob *target, uint16 spell_id, int32 caster_level_override)
{
int formula, duration;
if(!IsValidSpell(spell_id) || (!caster && !target))
return 0;
if(!caster && !target)
return 0;
// if we have at least one, we can make do, we'll just pretend they're the same
if(!caster)
caster = target;
if(!target)
target = caster;
// PVP duration
if (IsDetrimentalSpell(spell_id) && target->IsClient() && caster->IsClient()) {
formula = spells[spell_id].pvp_duration;
duration = spells[spell_id].pvp_duration_cap;
} else {
formula = spells[spell_id].buffdurationformula;
duration = spells[spell_id].buffduration;
}
int castlevel = caster->GetCasterLevel(spell_id);
if(caster_level_override > 0)
castlevel = caster_level_override;
int res = CalcBuffDuration_formula(castlevel, formula, duration);
if (caster == target && (target->aabonuses.IllusionPersistence || target->spellbonuses.IllusionPersistence ||
target->itembonuses.IllusionPersistence) &&
spell_id != 287 && spell_id != 601 && IsEffectInSpell(spell_id, SE_Illusion))
res = 10000; // ~16h override
res = mod_buff_duration(res, caster, target, spell_id);
LogSpells("Spell [{}]: Casting level [{}], formula [{}], base_duration [{}]: result [{}]",
spell_id, castlevel, formula, duration, res);
return res;
}
// the generic formula calculations
int CalcBuffDuration_formula(int level, int formula, int duration)
{
int temp;
switch (formula) {
case 1:
temp = level > 3 ? level / 2 : 1;
break;
case 2:
temp = level > 3 ? level / 2 + 5 : 6;
break;
case 3:
temp = 30 * level;
break;
case 4: // only used by 'LowerElement'
temp = 50;
break;
case 5:
temp = 2;
break;
case 6:
temp = level / 2 + 2;
break;
case 7:
temp = level;
break;
case 8:
temp = level + 10;
break;
case 9:
temp = 2 * level + 10;
break;
case 10:
temp = 3 * level + 10;
break;
case 11:
temp = 30 * (level + 3);
break;
case 12:
temp = level > 7 ? level / 4 : 1;
break;
case 13:
temp = 4 * level + 10;
break;
case 14:
temp = 5 * (level + 2);
break;
case 15:
temp = 10 * (level + 10);
break;
case 50: // Permanent. Cancelled by casting/combat for perm invis, non-lev zones for lev, curing poison/curse
// counters, etc.
return -1;
case 51: // Permanent. Cancelled when out of range of aura.
return -4;
default:
// the client function has another bool parameter that if true returns -2 -- unsure
if (formula < 200)
return 0;
temp = formula;
break;
}
if (duration && duration < temp)
temp = duration;
return temp;
}
// helper function for AddBuff to determine stacking
// spellid1 is the spell already worn, spellid2 is the one trying to be cast
// returns:
// 0 if not the same type, no action needs to be taken
// 1 if spellid1 should be removed (overwrite)
// -1 if they can't stack and spellid2 should be stopped
//currently, a spell will not land if it would overwrite a better spell on any effect
//if all effects are better or the same, we overwrite, else we do nothing
int Mob::CheckStackConflict(uint16 spellid1, int caster_level1, uint16 spellid2, int caster_level2, Mob* caster1, Mob* caster2, int buffslot)
{
const SPDat_Spell_Struct &sp1 = spells[spellid1];
const SPDat_Spell_Struct &sp2 = spells[spellid2];
int i, effect1, effect2, sp1_value, sp2_value;
int blocked_effect, blocked_below_value, blocked_slot;
int overwrite_effect, overwrite_below_value, overwrite_slot;
LogSpells("Check Stacking on old [{}] ([{}]) @ lvl [{}] (by [{}]) vs. new [{}] ([{}]) @ lvl [{}] (by [{}])", sp1.name, spellid1, caster_level1, (caster1==nullptr)?"Nobody":caster1->GetName(), sp2.name, spellid2, caster_level2, (caster2==nullptr)?"Nobody":caster2->GetName());
if (spellbonuses.CompleteHealBuffBlocker && IsEffectInSpell(spellid2, SE_CompleteHeal)) {
Message(0, "You must wait before you can be affected by this spell again.");
return -1;
}
if (spellid1 == spellid2 ) {
if (!IsStackableDot(spellid1) && !IsEffectInSpell(spellid1, SE_ManaBurn)) { // mana burn spells we need to use the stacking command blocks live actually checks those first, we should probably rework to that too
if (caster_level1 > caster_level2) { // cur buff higher level than new
if (IsEffectInSpell(spellid1, SE_ImprovedTaunt)) {
LogSpells("SE_ImprovedTaunt level exception, overwriting");
return 1;
} else {
LogSpells("Spells the same but existing is higher level, stopping");
return -1;
}
} else {
LogSpells("Spells the same but newer is higher or equal level, overwriting");
return 1;
}
} else if (spellid1 == 2751) {
LogSpells("Blocking spell because manaburn does not stack with itself");
return -1;
}
}
int modval = mod_spell_stack(spellid1, caster_level1, caster1, spellid2, caster_level2, caster2);
if(modval < 2) { return(modval); }
/*
One of these is a bard song and one isn't and they're both beneficial so they should stack.
*/
if(IsBardSong(spellid1) != IsBardSong(spellid2))
{
if(!IsDetrimentalSpell(spellid1) && !IsDetrimentalSpell(spellid2))
{
LogSpells("[{}] and [{}] are beneficial, and one is a bard song, no action needs to be taken", sp1.name, sp2.name);
return (0);
}
}
bool effect_match = true; // Figure out if we're identical in effects on all slots.
if (spellid1 != spellid2) {
for (i = 0; i < EFFECT_COUNT; i++) {
// we don't want this optimization for mana burns
if (sp1.effectid[i] != sp2.effectid[i] || sp1.effectid[i] == SE_ManaBurn) {
effect_match = false;
break;
}
}
} else if (IsEffectInSpell(spellid1, SE_ManaBurn)) {
LogSpells("We have a Mana Burn spell that is the same, they won't stack");
return -1;
}
// check for special stacking overwrite in spell2 against effects in spell1
// If all of the effects match they are the same line and shouldn't care for these checks
if (!effect_match) {
for(i = 0; i < EFFECT_COUNT; i++)
{
effect1 = sp1.effectid[i];
effect2 = sp2.effectid[i];
if (spellbonuses.Screech == 1) {
if (effect2 == SE_Screech && sp2.base[i] == -1) {
MessageString(Chat::SpellFailure, SCREECH_BUFF_BLOCK, sp2.name);
return -1;
}
}
/*
Buff stacking prevention spell effects (446 - 449) works as follows... If B prevent A, if C prevent B, if D prevent C.
If checking same type ie A vs A, which ever effect base value is higher will take hold.
Special check is added to make sure the buffs stack properly when applied from fade on duration effect, since the buff
is not fully removed at the time of the trigger
*/
if (spellbonuses.AStacker[SBIndex::BUFFSTACKER_EXISTS]) {
if ((effect2 == SE_AStacker) && (sp2.effectid[i] <= spellbonuses.AStacker[SBIndex::BUFFSTACKER_VALUE]))
return -1;
}
if (spellbonuses.BStacker[SBIndex::BUFFSTACKER_EXISTS]) {
if ((effect2 == SE_BStacker) && (sp2.effectid[i] <= spellbonuses.BStacker[SBIndex::BUFFSTACKER_VALUE]))
return -1;
if ((effect2 == SE_AStacker) && (!IsCastonFadeDurationSpell(spellid1) && buffs[buffslot].ticsremaining != 1 && IsEffectInSpell(spellid1, SE_BStacker)))
return -1;
}
if (spellbonuses.CStacker[SBIndex::BUFFSTACKER_EXISTS]) {
if ((effect2 == SE_CStacker) && (sp2.effectid[i] <= spellbonuses.CStacker[SBIndex::BUFFSTACKER_VALUE]))
return -1;
if ((effect2 == SE_BStacker) && (!IsCastonFadeDurationSpell(spellid1) && buffs[buffslot].ticsremaining != 1 && IsEffectInSpell(spellid1, SE_CStacker)))
return -1;
}
if (spellbonuses.DStacker[SBIndex::BUFFSTACKER_EXISTS]) {
if ((effect2 == SE_DStacker) && (sp2.effectid[i] <= spellbonuses.DStacker[SBIndex::BUFFSTACKER_VALUE]))
return -1;
if ((effect2 == SE_CStacker) && (!IsCastonFadeDurationSpell(spellid1) && buffs[buffslot].ticsremaining != 1 && IsEffectInSpell(spellid1, SE_DStacker)))
return -1;
}
if(effect2 == SE_StackingCommand_Overwrite)
{
overwrite_effect = sp2.base[i];
overwrite_slot = sp2.formula[i] - 201; //they use base 1 for slots, we use base 0
overwrite_below_value = sp2.max[i];
if(sp1.effectid[overwrite_slot] == overwrite_effect)
{
sp1_value = CalcSpellEffectValue(spellid1, overwrite_slot, caster_level1);
LogSpells("[{}] ([{}]) overwrites existing spell if effect [{}] on slot [{}] is below [{}]. Old spell has value [{}] on that slot/effect. [{}]",
sp2.name, spellid2, overwrite_effect, overwrite_slot, overwrite_below_value, sp1_value, (sp1_value < overwrite_below_value)?"Overwriting":"Not overwriting");
if(sp1_value < overwrite_below_value)
{
LogSpells("Overwrite spell because sp1_value < overwrite_below_value");
return 1; // overwrite spell if its value is less
}
} else {
LogSpells("[{}] ([{}]) overwrites existing spell if effect [{}] on slot [{}] is below [{}], but we do not have that effect on that slot. Ignored",
sp2.name, spellid2, overwrite_effect, overwrite_slot, overwrite_below_value);
}
} else if (effect1 == SE_StackingCommand_Block)
{
blocked_effect = sp1.base[i];
blocked_slot = sp1.formula[i] - 201;
blocked_below_value = sp1.max[i];
if (sp2.effectid[blocked_slot] == blocked_effect)
{
sp2_value = CalcSpellEffectValue(spellid2, blocked_slot, caster_level2);
LogSpells("[{}] ([{}]) blocks effect [{}] on slot [{}] below [{}]. New spell has value [{}] on that slot/effect. [{}]",
sp1.name, spellid1, blocked_effect, blocked_slot, blocked_below_value, sp2_value, (sp2_value < blocked_below_value)?"Blocked":"Not blocked");
if (sp2_value < blocked_below_value)
{
LogSpells("Blocking spell because sp2_Value < blocked_below_value");
return -1; //blocked
}
} else {
LogSpells("[{}] ([{}]) blocks effect [{}] on slot [{}] below [{}], but we do not have that effect on that slot. Ignored",
sp1.name, spellid1, blocked_effect, blocked_slot, blocked_below_value);
}
}
}
} else {
LogSpells("[{}] ([{}]) and [{}] ([{}]) appear to be in the same line, skipping Stacking Overwrite/Blocking checks",
sp1.name, spellid1, sp2.name, spellid2);
}
bool sp1_detrimental = IsDetrimentalSpell(spellid1);
bool sp2_detrimental = IsDetrimentalSpell(spellid2);
bool sp_det_mismatch;
if(sp1_detrimental == sp2_detrimental)
sp_det_mismatch = false;
else
sp_det_mismatch = true;
// now compare matching effects
// arbitration takes place if 2 spells have the same effect at the same
// effect slot, otherwise they're stackable, even if it's the same effect
bool will_overwrite = false;
bool values_equal = true;
for(i = 0; i < EFFECT_COUNT; i++)
{
if(IsBlankSpellEffect(spellid1, i) || IsBlankSpellEffect(spellid2, i))
continue;
effect1 = sp1.effectid[i];
effect2 = sp2.effectid[i];
/*
Quick check, are the effects the same, if so then
keep going else ignore it for stacking purposes.
*/
if(effect1 != effect2)
continue;
if (IsBardOnlyStackEffect(effect1) && GetSpellLevel(spellid1, BARD) != 255 &&
GetSpellLevel(spellid2, BARD) != 255)
continue;
// big ol' list according to the client, wasn't that nice!
if (IsEffectIgnoredInStacking(effect1))
continue;
// negative AC affects are skipped. Ex. Sun's Corona and Glacier Breath should stack
// There may be more SPAs we need to add here ....
// The client does just check base rather than calculating the affect change value.
if ((effect1 == SE_ArmorClass || effect1 == SE_ACv2) && sp2.base[i] < 0)
continue;
/*
If target is a npc and caster1 and caster2 exist
If Caster1 isn't the same as Caster2 and the effect is a DoT then ignore it.
*/
if(IsNPC() && caster1 && caster2 && caster1 != caster2) {
if(effect1 == SE_CurrentHP && sp1_detrimental && sp2_detrimental) {
LogSpells("Both casters exist and are not the same, the effect is a detrimental dot, moving on");
continue;
}
}
if(effect1 == SE_CompleteHeal){ //SE_CompleteHeal never stacks or overwrites ever, always block.
LogSpells("Blocking spell because complete heal never stacks or overwries");
return (-1);
}
/*
If the spells aren't the same
and the effect is a dot we can go ahead and stack it
*/
if(effect1 == SE_CurrentHP && spellid1 != spellid2 && sp1_detrimental && sp2_detrimental) {
LogSpells("The spells are not the same and it is a detrimental dot, passing");
continue;
}
sp1_value = CalcSpellEffectValue(spellid1, i, caster_level1);
sp2_value = CalcSpellEffectValue(spellid2, i, caster_level2);
// some spells are hard to compare just on value. attack speed spells
// have a value that's a percentage for instance
if
(
effect1 == SE_AttackSpeed ||
effect1 == SE_AttackSpeed2
)
{
sp1_value -= 100;
sp2_value -= 100;
}
if(sp1_value < 0)
sp1_value = 0 - sp1_value;
if(sp2_value < 0)
sp2_value = 0 - sp2_value;
if(sp2_value < sp1_value) {
LogSpells("Spell [{}] (value [{}]) is not as good as [{}] (value [{}]). Rejecting [{}]",
sp2.name, sp2_value, sp1.name, sp1_value, sp2.name);
return -1; // can't stack
}
if (sp2_value != sp1_value)
values_equal = false;
//we dont return here... a better value on this one effect dosent mean they are
//all better...
LogSpells("Spell [{}] (value [{}]) is not as good as [{}] (value [{}]). We will overwrite [{}] if there are no other conflicts",
sp1.name, sp1_value, sp2.name, sp2_value, sp1.name);
will_overwrite = true;
}
//if we get here, then none of the values on the new spell are "worse"
//so now we see if this new spell is any better, or if its not related at all
if(will_overwrite) {
if (values_equal && effect_match && !IsGroupSpell(spellid2) && IsGroupSpell(spellid1)) {
LogSpells("[{}] ([{}]) appears to be the single target version of [{}] ([{}]), rejecting",
sp2.name, spellid2, sp1.name, spellid1);
return -1;
}
LogSpells("Stacking code decided that [{}] should overwrite [{}]", sp2.name, sp1.name);
return(1);
}
LogSpells("Stacking code decided that [{}] is not affected by [{}]", sp2.name, sp1.name);
return 0;
}
// Check Spell Level Restrictions
// returns true if they meet the restrictions, false otherwise
// derived from http://samanna.net/eq.general/buffs.shtml
// spells 1-50: no restrictons
// 51-65: SpellLevel/2+15
// 66+ Group Spells 62, Single Target 61
bool Mob::CheckSpellLevelRestriction(uint16 spell_id)
{
return true;
}
bool Client::CheckSpellLevelRestriction(uint16 spell_id)
{
int SpellLevel = GetMinLevel(spell_id);
// Only check for beneficial buffs
if (IsBuffSpell(spell_id) && IsBeneficialSpell(spell_id)) {
if (SpellLevel > 65) {
if (IsGroupSpell(spell_id) && GetLevel() < 62)
return false;
else if (GetLevel() < 61)
return false;
} else if (SpellLevel > 50) { // 51-65
if (GetLevel() < (SpellLevel / 2 + 15))
return false;
}
}
return true;
}
uint32 Mob::GetFirstBuffSlot(bool disc, bool song)
{
return 0;
}
uint32 Mob::GetLastBuffSlot(bool disc, bool song)
{
return GetCurrentBuffSlots();
}
uint32 Client::GetFirstBuffSlot(bool disc, bool song)
{
if (song)
return GetMaxBuffSlots();
if (disc)
return GetMaxBuffSlots() + GetMaxSongSlots();
return 0;
}
uint32 Client::GetLastBuffSlot(bool disc, bool song)
{
if (song)
return GetMaxBuffSlots() + GetCurrentSongSlots();
if (disc)
return GetMaxBuffSlots() + GetMaxSongSlots() + GetCurrentDiscSlots();
return GetCurrentBuffSlots();
}
bool Mob::HasDiscBuff()
{
int slot = GetFirstBuffSlot(true, false);
return buffs[slot].spellid != SPELL_UNKNOWN;
}
// returns the slot the buff was added to, -1 if it wasn't added due to
// stacking problems, and -2 if this is not a buff
// if caster is null, the buff will be added with the caster level being
// the level of the mob
int Mob::AddBuff(Mob *caster, uint16 spell_id, int duration, int32 level_override)
{
int buffslot, ret, caster_level, emptyslot = -1;
bool will_overwrite = false;
std::vector<int> overwrite_slots;
if (level_override > 0)
caster_level = level_override;
else
caster_level = caster ? caster->GetCasterLevel(spell_id) : GetCasterLevel(spell_id);
if (duration == 0) {
duration = CalcBuffDuration(caster, this, spell_id);
if (caster && duration > 0) // negatives are perma buffs
duration = caster->GetActSpellDuration(spell_id, duration);
}
if (duration == 0) {
LogSpells("Buff [{}] failed to add because its duration came back as 0", spell_id);
return -2; // no duration? this isn't a buff
}
LogSpells("Trying to add buff [{}] cast by [{}] (cast level [{}]) with duration [{}]",
spell_id, caster?caster->GetName():"UNKNOWN", caster_level, duration);
// first we loop through everything checking that the spell
// can stack with everything. this is to avoid stripping the spells
// it would overwrite, and then hitting a buff we can't stack with.
// we also check if overwriting will occur. this is so after this loop
// we can determine if there will be room for this buff
int buff_count = GetMaxTotalSlots();
uint32 start_slot = GetFirstBuffSlot(IsDisciplineBuff(spell_id), spells[spell_id].short_buff_box);
uint32 end_slot = GetLastBuffSlot(IsDisciplineBuff(spell_id), spells[spell_id].short_buff_box);
for (buffslot = 0; buffslot < buff_count; buffslot++) {
const Buffs_Struct &curbuf = buffs[buffslot];
if (curbuf.spellid != SPELL_UNKNOWN) {
// there's a buff in this slot
ret = CheckStackConflict(curbuf.spellid, curbuf.casterlevel, spell_id,
caster_level, entity_list.GetMobID(curbuf.casterid), caster, buffslot);
if (ret == -1) { // stop the spell
LogSpells("Adding buff [{}] failed: stacking prevented by spell [{}] in slot [{}] with caster level [{}]",
spell_id, curbuf.spellid, buffslot, curbuf.casterlevel);
if (caster && caster->IsClient() && RuleB(Client, UseLiveBlockedMessage)) {
caster->Message(Chat::Red, "Your %s did not take hold on %s. (Blocked by %s.)", spells[spell_id].name, this->GetName(), spells[curbuf.spellid].name);
}
return -1;
}
if (ret == 1) { // set a flag to indicate that there will be overwriting
LogSpells("Adding buff [{}] will overwrite spell [{}] in slot [{}] with caster level [{}]",
spell_id, curbuf.spellid, buffslot, curbuf.casterlevel);
// If this is the first buff it would override, use its slot
if (!will_overwrite && !IsDisciplineBuff(spell_id))
emptyslot = buffslot;
will_overwrite = true;
overwrite_slots.push_back(buffslot);
}
} else {
if (emptyslot == -1) {
if (buffslot >= start_slot && buffslot < end_slot) {
emptyslot = buffslot;
}
}
}
}
// we didn't find an empty slot to put it in, and it's not overwriting
// anything so there must not be any room left.
if (emptyslot == -1 && !will_overwrite) {
if (IsDetrimentalSpell(spell_id)) {//Sucks to be you, bye bye one of your buffs
for (buffslot = 0; buffslot < buff_count; buffslot++) {
const Buffs_Struct &curbuf = buffs[buffslot];
if (IsBeneficialSpell(curbuf.spellid)) {
LogSpells("No slot for detrimental buff [{}], so we are overwriting a beneficial buff [{}] in slot [{}]",
spell_id, curbuf.spellid, buffslot);
BuffFadeBySlot(buffslot, false);
emptyslot = buffslot;
break;
}
}
if(emptyslot == -1) {
LogSpells("Unable to find a buff slot for detrimental buff [{}]", spell_id);
return -1;
}
} else {
LogSpells("Unable to find a buff slot for beneficial buff [{}]", spell_id);
return -1;
}
}
// at this point we know that this buff will stick, but we have
// to remove some other buffs already worn if will_overwrite is true
if (will_overwrite) {
std::vector<int>::iterator cur, end;
cur = overwrite_slots.begin();
end = overwrite_slots.end();
for (; cur != end; ++cur) {
// strip spell
BuffFadeBySlot(*cur, false);
// if we hadn't found a free slot before, or if this is earlier
// we use it
if (emptyslot == -1 || (*cur < emptyslot && !IsDisciplineBuff(spell_id)))
emptyslot = *cur;
}
}
// now add buff at emptyslot
assert(buffs[emptyslot].spellid == SPELL_UNKNOWN); // sanity check
buffs[emptyslot].spellid = spell_id;
buffs[emptyslot].casterlevel = caster_level;
if (caster && !caster->IsAura()) // maybe some other things we don't want to ...
strcpy(buffs[emptyslot].caster_name, caster->GetCleanName());
else
memset(buffs[emptyslot].caster_name, 0, 64);
buffs[emptyslot].casterid = caster ? caster->GetID() : 0;
buffs[emptyslot].ticsremaining = duration;
buffs[emptyslot].counters = CalculateCounters(spell_id);
buffs[emptyslot].numhits = spells[spell_id].numhits;
buffs[emptyslot].client = caster ? caster->IsClient() : 0;
buffs[emptyslot].persistant_buff = 0;
buffs[emptyslot].caston_x = 0;
buffs[emptyslot].caston_y = 0;
buffs[emptyslot].caston_z = 0;
buffs[emptyslot].dot_rune = 0;
buffs[emptyslot].ExtraDIChance = 0;
buffs[emptyslot].RootBreakChance = 0;
buffs[emptyslot].virus_spread_time = 0;
buffs[emptyslot].instrument_mod = caster ? caster->GetInstrumentMod(spell_id) : 10;
if (level_override > 0 || buffs[emptyslot].numhits > 0) {
buffs[emptyslot].UpdateClient = true;
} else {
if (buffs[emptyslot].ticsremaining > (1 + CalcBuffDuration_formula(caster_level, spells[spell_id].buffdurationformula, spells[spell_id].buffduration)))
buffs[emptyslot].UpdateClient = true;
}
LogSpells("Buff [{}] added to slot [{}] with caster level [{}]", spell_id, emptyslot, caster_level);
if (IsPet() && GetOwner() && GetOwner()->IsClient())
SendPetBuffsToClient();
if((IsClient() && !CastToClient()->GetPVP()) ||
(IsPet() && GetOwner() && GetOwner()->IsClient() && !GetOwner()->CastToClient()->GetPVP()) ||
#ifdef BOTS
(IsBot() && GetOwner() && GetOwner()->IsClient() && !GetOwner()->CastToClient()->GetPVP()) ||
#endif
(IsMerc() && GetOwner() && GetOwner()->IsClient() && !GetOwner()->CastToClient()->GetPVP()))
{
EQApplicationPacket *outapp = MakeBuffsPacket();
entity_list.QueueClientsByTarget(this, outapp, false, nullptr, true, false, EQ::versions::maskSoDAndLater);
if(IsClient() && GetTarget() == this)
CastToClient()->QueuePacket(outapp);
safe_delete(outapp);
}
if (IsNPC()) {
EQApplicationPacket *outapp = MakeBuffsPacket();
entity_list.QueueClientsByTarget(this, outapp, false, nullptr, true, false, EQ::versions::maskSoDAndLater, true);
safe_delete(outapp);
}
// recalculate bonuses since we stripped/added buffs
CalcBonuses();
return emptyslot;
}
// used by some MobAI stuff
// NOT USED BY SPELL CODE
// note that this should not be used for determining which slot to place a
// buff into
// returns -1 on stack failure, -2 if all slots full, the slot number if the buff should overwrite another buff, or a free buff slot
int Mob::CanBuffStack(uint16 spellid, uint8 caster_level, bool iFailIfOverwrite)
{
int i, ret, firstfree = -2;
LogAI("Checking if buff [{}] cast at level [{}] can stack on me.[{}]", spellid, caster_level, iFailIfOverwrite?" failing if we would overwrite something":"");
int buff_count = GetMaxTotalSlots();
for (i=0; i < buff_count; i++)
{
const Buffs_Struct &curbuf = buffs[i];
// no buff in this slot
if (curbuf.spellid == SPELL_UNKNOWN)
{
// if we haven't found a free slot, this is the first one so save it
if(firstfree == -2)
firstfree = i;
continue;
}
if(curbuf.spellid == spellid)
return(-1); //do not recast a buff we already have on, we recast fast enough that we dont need to refresh our buffs
// there's a buff in this slot
ret = CheckStackConflict(curbuf.spellid, curbuf.casterlevel, spellid, caster_level, nullptr, nullptr, i);
if(ret == 1) {
// should overwrite current slot
if(iFailIfOverwrite) {
LogAI("Buff [{}] would overwrite [{}] in slot [{}], reporting stack failure", spellid, curbuf.spellid, i);
return(-1);
}
if(firstfree == -2)
firstfree = i;
}
if(ret == -1) {
LogAI("Buff [{}] would conflict with [{}] in slot [{}], reporting stack failure", spellid, curbuf.spellid, i);
return -1; // stop the spell, can't stack it
}
}
LogAI("Reporting that buff [{}] could successfully be placed into slot [{}]", spellid, firstfree);
return firstfree;
}
///////////////////////////////////////////////////////////////////////////////
// spell effect related functions
//
// this is actually applying a spell cast from 'this' on 'spelltar'
// it performs pvp checking and applies resists, etc then it
// passes it to SpellEffect which causes effects to the target
//
// this is called by these functions:
// Mob::SpellFinished
// Entity::AESpell (called by Mob::SpellFinished)
// Group::CastGroupSpell (called by Mob::SpellFinished)
//
// also note you can't interrupt the spell here. at this point it's going
// and if you don't want effects just return false. interrupting here will
// break stuff
//
bool Mob::SpellOnTarget(uint16 spell_id, Mob *spelltar, int reflect_effectiveness, bool use_resist_adjust, int16 resist_adjust,
bool isproc, int level_override, int32 duration_override)
{
bool is_damage_or_lifetap_spell = IsDamageSpell(spell_id) || IsLifetapSpell(spell_id);
// well we can't cast a spell on target without a target
if(!spelltar)
{
LogSpells("Unable to apply spell [{}] without a target", spell_id);
Message(Chat::Red, "SOT: You must have a target for this spell.");
return false;
}
if(spelltar->IsClient() && spelltar->CastToClient()->IsHoveringForRespawn())
return false;
if(IsDetrimentalSpell(spell_id) && !IsAttackAllowed(spelltar, true) && !IsResurrectionEffects(spell_id)) {
if(!IsClient() || !CastToClient()->GetGM()) {
MessageString(Chat::SpellFailure, SPELL_NO_HOLD);
return false;
}
}
EQApplicationPacket *action_packet = nullptr, *message_packet = nullptr;
float spell_effectiveness;
if(!IsValidSpell(spell_id))
return false;
// these target types skip pcnpc only check (according to dev quotes)
// other AE spells this is redundant, oh well
// 1 = PCs, 2 = NPCs
if (spells[spell_id].pcnpc_only_flag && spells[spell_id].targettype != ST_AETargetHateList &&
spells[spell_id].targettype != ST_HateList) {
if (spells[spell_id].pcnpc_only_flag == 1 && !spelltar->IsClient() && !spelltar->IsMerc() && !spelltar->IsBot())
return false;
else if (spells[spell_id].pcnpc_only_flag == 2 && (spelltar->IsClient() || spelltar->IsMerc() || spelltar->IsBot()))
return false;
}
uint16 caster_level = level_override > 0 ? level_override : GetCasterLevel(spell_id);
LogSpells("Casting spell [{}] on [{}] with effective caster level [{}]", spell_id, spelltar->GetName(), caster_level);
// Actual cast action - this causes the caster animation and the particles
// around the target
// we do this first, that way we get the particles even if the spell
// doesn't land due to pvp protection
// note: this packet is sent again if the spell is successful, with a flag
// set
action_packet = new EQApplicationPacket(OP_Action, sizeof(Action_Struct));
Action_Struct* action = (Action_Struct*) action_packet->pBuffer;
// select source
if(IsClient() && CastToClient()->GMHideMe())
{
action->source = spelltar->GetID();
}
else
{
action->source = GetID();
// this is a hack that makes detrimental buffs work client to client
// TODO figure out how to do this right
if
(
IsDetrimentalSpell(spell_id) &&
IsClient() &&
spelltar->IsClient()
)
{
action->source = spelltar->GetID();
}
}
// select target
if // Bind Sight line of spells
(
spell_id == 500 || // bind sight
spell_id == 407 // cast sight
)
{
action->target = GetID();
}
else
{
action->target = spelltar->GetID();
}
action->spell_level = action->level = caster_level; // caster level, for animation only
action->type = 231; // 231 means a spell
action->spell = spell_id;
action->force = spells[spell_id].pushback;
action->hit_heading = GetHeading();
action->hit_pitch = spells[spell_id].pushup;
action->instrument_mod = GetInstrumentMod(spell_id);
action->effect_flag = 0;
if(spelltar != this && spelltar->IsClient()) // send to target
spelltar->CastToClient()->QueuePacket(action_packet);
if(IsClient()) // send to caster
CastToClient()->QueuePacket(action_packet);
// send to people in the area, ignoring caster and target
entity_list.QueueCloseClients(
spelltar, /* Sender */
action_packet, /* Packet */
true, /* Ignore Sender */
RuleI(Range, SpellMessages),
this, /* Skip this Mob */
true, /* Packet ACK */
(spelltar->IsClient() ? FilterPCSpells : FilterNPCSpells) /* EQ Filter Type: (8 or 9) */
);
/* Send the EVENT_CAST_ON event */
std::string export_string = fmt::format("{}", spell_id);
if(spelltar->IsNPC()) {
parse->EventNPC(EVENT_CAST_ON, spelltar->CastToNPC(), this, export_string, 0);
} else if (spelltar->IsClient()) {
parse->EventPlayer(EVENT_CAST_ON, spelltar->CastToClient(), export_string, 0);
}
mod_spell_cast(spell_id, spelltar, reflect_effectiveness, use_resist_adjust, resist_adjust, isproc);
// now check if the spell is allowed to land
if (RuleB(Spells, EnableBlockedBuffs)) {
// We return true here since the caster's client should act like normal
if (spelltar->IsBlockedBuff(spell_id)) {
LogSpells("Spell [{}] not applied to [{}] as it is a Blocked Buff",
spell_id, spelltar->GetName());
safe_delete(action_packet);
return true;
}
if (spelltar->IsPet() && spelltar->GetOwner() &&
spelltar->GetOwner()->IsBlockedPetBuff(spell_id)) {
LogSpells("Spell [{}] not applied to [{}] ([{}]'s pet) as it is a Pet Blocked Buff",
spell_id, spelltar->GetName(), spelltar->GetOwner()->GetName());
safe_delete(action_packet);
return true;
}
}
// invuln mobs can't be affected by any spells, good or bad
if(spelltar->GetInvul() || spelltar->DivineAura()) {
LogSpells("Casting spell [{}] on [{}] aborted: they are invulnerable", spell_id, spelltar->GetName());
safe_delete(action_packet);
return false;
}
//cannot hurt untargetable mobs
bodyType bt = spelltar->GetBodyType();
if(bt == BT_NoTarget || bt == BT_NoTarget2) {
if (RuleB(Pets, UnTargetableSwarmPet)) {
if (spelltar->IsNPC()) {
if (!spelltar->CastToNPC()->GetSwarmOwner()) {
LogSpells("Casting spell [{}] on [{}] aborted: they are untargetable", spell_id, spelltar->GetName());
safe_delete(action_packet);
return(false);
}
} else {
LogSpells("Casting spell [{}] on [{}] aborted: they are untargetable", spell_id, spelltar->GetName());
safe_delete(action_packet);
return(false);
}
} else {
LogSpells("Casting spell [{}] on [{}] aborted: they are untargetable", spell_id, spelltar->GetName());
safe_delete(action_packet);
return(false);
}
}
// Prevent double invising, which made you uninvised
// Not sure if all 3 should be stacking
if (!RuleB(Spells, AllowDoubleInvis)) {
if (IsEffectInSpell(spell_id, SE_Invisibility))
{
if (spelltar->invisible)
{
spelltar->MessageString(Chat::SpellFailure, ALREADY_INVIS, GetCleanName());
safe_delete(action_packet);
return false;
}
}
if (IsEffectInSpell(spell_id, SE_InvisVsUndead))
{
if (spelltar->invisible_undead)
{
spelltar->MessageString(Chat::SpellFailure, ALREADY_INVIS, GetCleanName());
safe_delete(action_packet);
return false;
}
}
if (IsEffectInSpell(spell_id, SE_InvisVsAnimals))
{
if (spelltar->invisible_animals)
{
spelltar->MessageString(Chat::SpellFailure, ALREADY_INVIS, GetCleanName());
safe_delete(action_packet);
return false;
}
}
}
if(!(IsClient() && CastToClient()->GetGM()) && !IsHarmonySpell(spell_id)) // GMs can cast on anything
{
// Beneficial spells check
if(IsBeneficialSpell(spell_id))
{
if(IsClient() && //let NPCs do beneficial spells on anybody if they want, should be the job of the AI, not the spell code to prevent this from going wrong
spelltar != this)
{
Client* pClient = nullptr;
Raid* pRaid = nullptr;
Group* pBasicGroup = nullptr;
uint32 nGroup = 0; //raid group
Client* pClientTarget = nullptr;
Raid* pRaidTarget = nullptr;
Group* pBasicGroupTarget = nullptr;
uint32 nGroupTarget = 0; //raid group
Client* pClientTargetPet = nullptr;
Raid* pRaidTargetPet = nullptr;
Group* pBasicGroupTargetPet = nullptr;
uint32 nGroupTargetPet = 0; //raid group
const uint32 cnWTF = 0xFFFFFFFF + 1; //this should be zero unless on 64bit? forced uint64?
//Caster client pointers
pClient = this->CastToClient();
pRaid = entity_list.GetRaidByClient(pClient);
pBasicGroup = entity_list.GetGroupByMob(this);
if(pRaid)
nGroup = pRaid->GetGroup(pClient) + 1;
//Target client pointers
if(spelltar->IsClient())
{
pClientTarget = spelltar->CastToClient();
pRaidTarget = entity_list.GetRaidByClient(pClientTarget);
pBasicGroupTarget = entity_list.GetGroupByMob(spelltar);
if(pRaidTarget)
nGroupTarget = pRaidTarget->GetGroup(pClientTarget) + 1;
}
if(spelltar->IsPet())
{
Mob *owner = spelltar->GetOwner();
if(owner->IsClient())
{
pClientTargetPet = owner->CastToClient();
pRaidTargetPet = entity_list.GetRaidByClient(pClientTargetPet);
pBasicGroupTargetPet = entity_list.GetGroupByMob(owner);
if(pRaidTargetPet)
nGroupTargetPet = pRaidTargetPet->GetGroup(pClientTargetPet) + 1;
}
}
if((!IsAllianceSpellLine(spell_id) && !IsBeneficialAllowed(spelltar)) ||
(IsGroupOnlySpell(spell_id) &&
!(
(pBasicGroup && ((pBasicGroup == pBasicGroupTarget) || (pBasicGroup == pBasicGroupTargetPet))) || //Basic Group
((nGroup != cnWTF) && ((nGroup == nGroupTarget) || (nGroup == nGroupTargetPet))) || //Raid group
(spelltar == GetPet()) //should be able to cast grp spells on self and pet despite grped status.
)
)
)
{
if(spells[spell_id].targettype == ST_AEBard) {
//if it was a beneficial AE bard song don't spam the window that it would not hold
LogSpells("Beneficial ae bard song [{}] can't take hold [{}] -> [{}], IBA? [{}]", spell_id, GetName(), spelltar->GetName(), IsBeneficialAllowed(spelltar));
} else {
LogSpells("Beneficial spell [{}] can't take hold [{}] -> [{}], IBA? [{}]", spell_id, GetName(), spelltar->GetName(), IsBeneficialAllowed(spelltar));
MessageString(Chat::SpellFailure, SPELL_NO_HOLD);
}
safe_delete(action_packet);
return false;
}
}
}
else if ( !IsAttackAllowed(spelltar, true) && !IsResurrectionEffects(spell_id)) // Detrimental spells - PVP check
{
LogSpells("Detrimental spell [{}] can't take hold [{}] -> [{}]", spell_id, GetName(), spelltar->GetName());
spelltar->MessageString(Chat::SpellFailure, YOU_ARE_PROTECTED, GetCleanName());
safe_delete(action_packet);
return false;
}
}
// ok at this point the spell is permitted to affect the target,
// but we need to check special cases and resists
// check immunities
if(spelltar->IsImmuneToSpell(spell_id, this))
{
//the above call does the message to the client if needed
LogSpells("Spell [{}] can't take hold due to immunity [{}] -> [{}]", spell_id, GetName(), spelltar->GetName());
safe_delete(action_packet);
return false;
}
//check for AE_Undead
if(spells[spell_id].targettype == ST_UndeadAE){
if(spelltar->GetBodyType() != BT_SummonedUndead &&
spelltar->GetBodyType() != BT_Undead &&
spelltar->GetBodyType() != BT_Vampire)
{
safe_delete(action_packet);
return false;
}
}
//Need this to account for special AOE cases.
if (IsClient() && IsHarmonySpell(spell_id) && !HarmonySpellLevelCheck(spell_id, spelltar)) {
MessageString(Chat::SpellFailure, SPELL_NO_EFFECT);
safe_delete(action_packet);
return false;
}
// Block next spell effect should be used up first(since its blocking the next spell)
if(CanBlockSpell()) {
int buff_count = GetMaxTotalSlots();
int focus = 0;
for (int b=0; b < buff_count; b++) {
if(IsEffectInSpell(buffs[b].spellid, SE_BlockNextSpellFocus)) {
focus = CalcFocusEffect(focusBlockNextSpell, buffs[b].spellid, spell_id);
if(focus) {
CheckNumHitsRemaining(NumHit::MatchingSpells, b);
MessageString(Chat::SpellFailure, SPELL_WOULDNT_HOLD);
safe_delete(action_packet);
return false;
}
}
}
}
/*
Reflect
base= % Chance to Reflect
Limit= Resist Modifier (+Value for decrease chance to resist)
Max= % of base spell damage (this is the base before any formula or focus is applied)
On live any type of detrimental spell can be reflected as long as the Reflectable spell field is set, this includes AOE.
The 'caster' of the reflected spell is owner of the reflect effect. Caster's focus effects are NOT applied to reflected spell.
reflect_effectiveness is applied to damage spells, a value of 100 is no change to base damage. Other values change by percent. (50=50% of damage)
we this variable to both check if a spell being applied is from a reflection and for the damage modifier.
There are a few spells in database that are not detrimental that have Reflectable field set, however from testing, they do not actually reflect.
*/
if(spells[spell_id].reflectable && !reflect_effectiveness && spelltar && this != spelltar && IsDetrimentalSpell(spell_id) &&
(spelltar->spellbonuses.reflect[SBIndex::REFLECT_CHANCE] || spelltar->aabonuses.reflect[SBIndex::REFLECT_CHANCE] || spelltar->itembonuses.reflect[SBIndex::REFLECT_CHANCE])) {
bool can_spell_reflect = false;
switch(RuleI(Spells, ReflectType))
{
case REFLECT_DISABLED:
break;
case REFLECT_SINGLE_TARGET_SPELLS_ONLY:
{
if(spells[spell_id].targettype == ST_Target) {
for(int y = 0; y < 16; y++) {
if (spells[spell_id].classes[y] < 255) {
can_spell_reflect = true;
}
}
}
break;
}
case REFLECT_ALL_PLAYER_SPELLS:
{
for(int y = 0; y < 16; y++) {
if (spells[spell_id].classes[y] < 255) {
can_spell_reflect = true;
}
}
break;
}
case RELFECT_ALL_SINGLE_TARGET_SPELLS:
{
if (spells[spell_id].targettype == ST_Target) {
can_spell_reflect = true;
}
break;
}
case REFLECT_ALL_SPELLS: //This is live like behavior
can_spell_reflect = true;
default:
break;
}
if (can_spell_reflect) {
int reflect_resist_adjust = 0;
int reflect_effectiveness_mod = 0; //Need value of 100 to do baseline unmodified damage.
if (spelltar->spellbonuses.reflect[SBIndex::REFLECT_CHANCE] && zone->random.Roll(spelltar->spellbonuses.reflect[SBIndex::REFLECT_CHANCE])) {
reflect_resist_adjust = spelltar->spellbonuses.reflect[SBIndex::REFLECT_RESISTANCE_MOD];
reflect_effectiveness_mod = spelltar->spellbonuses.reflect[SBIndex::REFLECT_DMG_EFFECTIVENESS] ? spelltar->spellbonuses.reflect[SBIndex::REFLECT_DMG_EFFECTIVENESS] : 100;
}
else if (spelltar->aabonuses.reflect[SBIndex::REFLECT_CHANCE] && zone->random.Roll(spelltar->aabonuses.reflect[SBIndex::REFLECT_CHANCE])) {
reflect_effectiveness_mod = 100;
reflect_resist_adjust = spelltar->aabonuses.reflect[SBIndex::REFLECT_RESISTANCE_MOD];
}
else if (spelltar->itembonuses.reflect[SBIndex::REFLECT_CHANCE] && zone->random.Roll(spelltar->itembonuses.reflect[SBIndex::REFLECT_CHANCE])) {
reflect_resist_adjust = spelltar->itembonuses.reflect[SBIndex::REFLECT_RESISTANCE_MOD];
reflect_effectiveness_mod = spelltar->itembonuses.reflect[SBIndex::REFLECT_DMG_EFFECTIVENESS] ? spelltar->itembonuses.reflect[SBIndex::REFLECT_DMG_EFFECTIVENESS] : 100;
}
if (reflect_effectiveness_mod) {
if (RuleB(Spells, ReflectMessagesClose)) {
entity_list.MessageCloseString(
this, /* Sender */
false, /* Skip Sender */
RuleI(Range, SpellMessages), /* Range */
Chat::Spells, /* Type */
SPELL_REFLECT, /* String ID */
GetCleanName(), /* Message 1 */
spelltar->GetCleanName() /* Message 2 */
);
}
else {
MessageString(Chat::Spells, SPELL_REFLECT, GetCleanName(), spelltar->GetCleanName());
}
CheckNumHitsRemaining(NumHit::ReflectSpell);
spelltar->SpellOnTarget(spell_id, this, reflect_effectiveness_mod, use_resist_adjust, (resist_adjust - reflect_resist_adjust));
safe_delete(action_packet);
return false;
}
}
}
// resist check - every spell can be resisted, beneficial or not
// add: ok this isn't true, eqlive's spell data is fucked up, buffs are
// not all unresistable, so changing this to only check certain spells
if(IsResistableSpell(spell_id))
{
spelltar->BreakInvisibleSpells(); //Any detrimental spell cast on you will drop invisible (can be AOE, non damage ect).
if (IsCharmSpell(spell_id) || IsMezSpell(spell_id) || IsFearSpell(spell_id))
spell_effectiveness = spelltar->ResistSpell(spells[spell_id].resisttype, spell_id, this, use_resist_adjust, resist_adjust, true, false, false, level_override);
else
spell_effectiveness = spelltar->ResistSpell(spells[spell_id].resisttype, spell_id, this, use_resist_adjust, resist_adjust, false, false, false, level_override);
if(spell_effectiveness < 100)
{
if(spell_effectiveness == 0 || !IsPartialCapableSpell(spell_id) )
{
LogSpells("Spell [{}] was completely resisted by [{}]", spell_id, spelltar->GetName());
if (spells[spell_id].resisttype == RESIST_PHYSICAL){
MessageString(Chat::SpellFailure, PHYSICAL_RESIST_FAIL,spells[spell_id].name);
spelltar->MessageString(Chat::SpellFailure, YOU_RESIST, spells[spell_id].name);
}
else {
MessageString(Chat::SpellFailure, TARGET_RESISTED, spells[spell_id].name);
spelltar->MessageString(Chat::SpellFailure, YOU_RESIST, spells[spell_id].name);
}
if (spelltar->IsAIControlled()) {
int32 aggro = CheckAggroAmount(spell_id, spelltar);
if (aggro > 0) {
if (!IsHarmonySpell(spell_id))
spelltar->AddToHateList(this, aggro);
else if (!spelltar->PassCharismaCheck(this, spell_id))
spelltar->AddToHateList(this, aggro);
} else {
int newhate = spelltar->GetHateAmount(this) + aggro;
spelltar->SetHateAmountOnEnt(this, std::max(1, newhate));
}
}
if (spelltar->IsClient()){
spelltar->CastToClient()->BreakSneakWhenCastOn(this, true);
spelltar->CastToClient()->BreakFeignDeathWhenCastOn(true);
}
spelltar->CheckNumHitsRemaining(NumHit::IncomingSpells);
CheckNumHitsRemaining(NumHit::OutgoingSpells);
safe_delete(action_packet);
return false;
}
}
if (spelltar->IsClient()){
spelltar->CastToClient()->BreakSneakWhenCastOn(this, false);
spelltar->CastToClient()->BreakFeignDeathWhenCastOn(false);
}
}
else
{
spell_effectiveness = 100;
}
if (spells[spell_id].feedbackable && (spelltar->spellbonuses.SpellDamageShield || spelltar->itembonuses.SpellDamageShield || spelltar->aabonuses.SpellDamageShield)) {
spelltar->DamageShield(this, true);
}
if (spelltar->IsAIControlled() && IsDetrimentalSpell(spell_id) && !IsHarmonySpell(spell_id)) {
int32 aggro_amount = CheckAggroAmount(spell_id, spelltar, isproc);
LogSpells("Spell [{}] cast on [{}] generated [{}] hate", spell_id,
spelltar->GetName(), aggro_amount);
if (aggro_amount > 0) {
spelltar->AddToHateList(this, aggro_amount);
} else {
int32 newhate = spelltar->GetHateAmount(this) + aggro_amount;
spelltar->SetHateAmountOnEnt(this, std::max(newhate, 1));
}
} else if (IsBeneficialSpell(spell_id) && !IsSummonPCSpell(spell_id)) {
if (this != spelltar && IsClient()){
if (spelltar->IsClient()) {
CastToClient()->UpdateRestTimer(spelltar->CastToClient()->GetRestTimer());
}
else if (spelltar->IsPet()) {
Mob *owner = spelltar->GetOwner();
if (owner && owner != this && owner->IsClient()) {
CastToClient()->UpdateRestTimer(owner->CastToClient()->GetRestTimer());
}
}
}
entity_list.AddHealAggro(
spelltar, this,
CheckHealAggroAmount(spell_id, spelltar, (spelltar->GetMaxHP() - spelltar->GetHP())));
}
// make sure spelltar is high enough level for the buff
if(RuleB(Spells, BuffLevelRestrictions) && !spelltar->CheckSpellLevelRestriction(spell_id))
{
LogSpells("Spell [{}] failed: recipient did not meet the level restrictions", spell_id);
if(!IsBardSong(spell_id))
MessageString(Chat::SpellFailure, SPELL_TOO_POWERFUL);
safe_delete(action_packet);
return false;
}
// cause the effects to the target
if(!spelltar->SpellEffect(this, spell_id, spell_effectiveness, level_override, reflect_effectiveness, duration_override))
{
// if SpellEffect returned false there's a problem applying the
// spell. It's most likely a buff that can't stack.
LogSpells("Spell [{}] could not apply its effects [{}] -> [{}]\n", spell_id, GetName(), spelltar->GetName());
if(casting_spell_aa_id)
MessageString(Chat::SpellFailure, SPELL_NO_HOLD);
safe_delete(action_packet);
return false;
}
//Check SE_Fc_Cast_Spell_On_Land SPA 481 on target, if hit by this spell and Conditions are Met then target will cast the specified spell.
if (spelltar)
spelltar->CastSpellOnLand(this, spell_id);
if (IsValidSpell(spells[spell_id].RecourseLink) && spells[spell_id].RecourseLink != spell_id)
SpellFinished(spells[spell_id].RecourseLink, this, CastingSlot::Item, 0, -1, spells[spells[spell_id].RecourseLink].ResistDiff);
if (IsDetrimentalSpell(spell_id)) {
CheckNumHitsRemaining(NumHit::OutgoingSpells);
if (spelltar)
spelltar->CheckNumHitsRemaining(NumHit::IncomingSpells);
}
// send the action packet again now that the spell is successful
// NOTE: this is what causes the buff icon to appear on the client, if
// this is a buff - but it sortof relies on the first packet.
// the complete sequence is 2 actions and 1 damage message
action->effect_flag = 0x04; // this is a success flag
if(spells[spell_id].pushback != 0.0f || spells[spell_id].pushup != 0.0f)
{
if (spelltar->IsClient())
{
if (!IsBuffSpell(spell_id))
{
spelltar->CastToClient()->cheat_manager.SetExemptStatus(KnockBack, true);
}
}
else if (RuleB(Spells, NPCSpellPush) && !spelltar->IsRooted() && spelltar->ForcedMovement == 0) {
spelltar->m_Delta.x += action->force * g_Math.FastSin(action->hit_heading);
spelltar->m_Delta.y += action->force * g_Math.FastCos(action->hit_heading);
spelltar->m_Delta.z += action->hit_pitch;
spelltar->ForcedMovement = 6;
}
}
if (spelltar->IsClient() && IsEffectInSpell(spell_id, SE_ShadowStep))
{
spelltar->CastToClient()->cheat_manager.SetExemptStatus(ShadowStep, true);
}
if(!IsEffectInSpell(spell_id, SE_BindAffinity))
{
if(spelltar != this && spelltar->IsClient()) // send to target
spelltar->CastToClient()->QueuePacket(action_packet);
if(IsClient()) // send to caster
CastToClient()->QueuePacket(action_packet);
}
// send to people in the area, ignoring caster and target
//live dosent send this to anybody but the caster
//entity_list.QueueCloseClients(spelltar, action_packet, true, 200, this, true, spelltar->IsClient() ? FILTER_PCSPELLS : FILTER_NPCSPELLS);
message_packet = new EQApplicationPacket(OP_Damage, sizeof(CombatDamage_Struct));
CombatDamage_Struct *cd = (CombatDamage_Struct *)message_packet->pBuffer;
cd->target = action->target;
cd->source = action->source;
cd->type = action->type;
cd->spellid = action->spell;
cd->force = action->force;
cd->hit_heading = action->hit_heading;
cd->hit_pitch = action->hit_pitch;
cd->damage = 0;
auto spellOwner = GetOwnerOrSelf();
if(!IsEffectInSpell(spell_id, SE_BindAffinity) && !is_damage_or_lifetap_spell){
entity_list.QueueCloseClients(
spelltar, /* Sender */
message_packet, /* Packet */
false, /* Ignore Sender */
RuleI(Range, SpellMessages),
0, /* Skip this mob */
true, /* Packet ACK */
(spelltar->IsClient() ? FilterPCSpells : FilterNPCSpells) /* Message Filter Type: (8 or 9) */
);
} else if (is_damage_or_lifetap_spell && spellOwner->IsClient()) {
spellOwner->CastToClient()->QueuePacket(
message_packet,
true,
Mob::CLIENT_CONNECTINGALL,
(spelltar->IsClient() ? FilterPCSpells : FilterNPCSpells)
);
}
safe_delete(action_packet);
safe_delete(message_packet);
LogSpells("Cast of [{}] by [{}] on [{}] complete successfully", spell_id, GetName(), spelltar->GetName());
return true;
}
void Corpse::CastRezz(uint16 spellid, Mob* Caster)
{
LogSpells("Corpse::CastRezz spellid [{}], Rezzed() is [{}], rezzexp is [{}]", spellid,IsRezzed(),rez_experience);
if(IsRezzed()){
if(Caster && Caster->IsClient())
Caster->Message(Chat::Red,"This character has already been resurrected.");
return;
}
/*
if(!can_rez) {
if(Caster && Caster->IsClient())
Caster->MessageString(Chat::White, CORPSE_TOO_OLD);
return;
}
*/
auto outapp = new EQApplicationPacket(OP_RezzRequest, sizeof(Resurrect_Struct));
Resurrect_Struct* rezz = (Resurrect_Struct*) outapp->pBuffer;
// Why are we truncating these names to 30 characters ?
memcpy(rezz->your_name,this->corpse_name,30);
memcpy(rezz->corpse_name,this->name,30);
memcpy(rezz->rezzer_name,Caster->GetName(),30);
rezz->zone_id = zone->GetZoneID();
rezz->instance_id = zone->GetInstanceID();
rezz->spellid = spellid;
rezz->x = m_Position.x;
rezz->y = m_Position.y;
rezz->z = GetFixedZ(m_Position);
rezz->unknown000 = 0x00000000;
rezz->unknown020 = 0x00000000;
rezz->unknown088 = 0x00000000;
// We send this to world, because it needs to go to the player who may not be in this zone.
worldserver.RezzPlayer(outapp, rez_experience, corpse_db_id, OP_RezzRequest);
safe_delete(outapp);
}
bool Mob::FindBuff(uint16 spellid)
{
int i;
uint32 buff_count = GetMaxTotalSlots();
for(i = 0; i < buff_count; i++)
if(buffs[i].spellid == spellid)
return true;
return false;
}
uint16 Mob::FindBuffBySlot(int slot) {
if (buffs[slot].spellid != SPELL_UNKNOWN)
return buffs[slot].spellid;
return 0;
}
uint32 Mob::BuffCount() {
uint32 active_buff_count = 0;
int buff_count = GetMaxTotalSlots();
for (int i = 0; i < buff_count; i++)
if (buffs[i].spellid != SPELL_UNKNOWN)
active_buff_count++;
return active_buff_count;
}
bool Mob::HasBuffWithSpellGroup(int spellgroup)
{
for (int i = 0; i < GetMaxTotalSlots(); i++) {
if (IsValidSpell(buffs[i].spellid) && spells[buffs[i].spellid].spellgroup == spellgroup) {
return true;
}
}
return false;
}
// removes all buffs
void Mob::BuffFadeAll()
{
int buff_count = GetMaxTotalSlots();
for (int j = 0; j < buff_count; j++) {
if(buffs[j].spellid != SPELL_UNKNOWN)
BuffFadeBySlot(j, false);
}
//we tell BuffFadeBySlot not to recalc, so we can do it only once when were done
CalcBonuses();
}
void Mob::BuffFadeNonPersistDeath()
{
int buff_count = GetMaxTotalSlots();
for (int j = 0; j < buff_count; j++) {
if (buffs[j].spellid != SPELL_UNKNOWN && !IsPersistDeathSpell(buffs[j].spellid))
BuffFadeBySlot(j, false);
}
//we tell BuffFadeBySlot not to recalc, so we can do it only once when were done
CalcBonuses();
}
void Mob::BuffFadeDetrimental() {
int buff_count = GetMaxTotalSlots();
for (int j = 0; j < buff_count; j++) {
if(buffs[j].spellid != SPELL_UNKNOWN) {
if(IsDetrimentalSpell(buffs[j].spellid))
BuffFadeBySlot(j, false);
}
}
//we tell BuffFadeBySlot not to recalc, so we can do it only once when were done
CalcBonuses();
}
void Mob::BuffFadeDetrimentalByCaster(Mob *caster)
{
if(!caster)
return;
int buff_count = GetMaxTotalSlots();
for (int j = 0; j < buff_count; j++) {
if(buffs[j].spellid != SPELL_UNKNOWN) {
if(IsDetrimentalSpell(buffs[j].spellid))
{
//this is a pretty terrible way to do this but
//there really isn't another way till I rewrite the basics
Mob * c = entity_list.GetMob(buffs[j].casterid);
if(c && c == caster)
BuffFadeBySlot(j, false);
}
}
}
//we tell BuffFadeBySlot not to recalc, so we can do it only once when were done
CalcBonuses();
}
void Mob::BuffFadeBySitModifier()
{
bool r_bonus = false;
uint32 buff_count = GetMaxTotalSlots();
for(uint32 j = 0; j < buff_count; ++j)
{
if(buffs[j].spellid != SPELL_UNKNOWN)
{
if(spells[buffs[j].spellid].disallow_sit)
{
BuffFadeBySlot(j, false);
r_bonus = true;
}
}
}
if(r_bonus)
{
CalcBonuses();
}
}
// removes the buff matching spell_id
void Mob::BuffFadeBySpellID(uint16 spell_id)
{
int buff_count = GetMaxTotalSlots();
for (int j = 0; j < buff_count; j++)
{
if (buffs[j].spellid == spell_id)
BuffFadeBySlot(j, false);
}
//we tell BuffFadeBySlot not to recalc, so we can do it only once when were done
CalcBonuses();
}
void Mob::BuffFadeBySpellIDAndCaster(uint16 spell_id, uint16 caster_id)
{
bool recalc_bonus = false;
auto buff_count = GetMaxTotalSlots();
for (int i = 0; i < buff_count; ++i) {
if (buffs[i].spellid == spell_id && buffs[i].casterid == caster_id) {
BuffFadeBySlot(i, false);
recalc_bonus = true;
}
}
if (recalc_bonus)
CalcBonuses();
}
// removes buffs containing effectid, skipping skipslot
void Mob::BuffFadeByEffect(int effectid, int skipslot)
{
int i;
int buff_count = GetMaxTotalSlots();
for(i = 0; i < buff_count; i++)
{
if(buffs[i].spellid == SPELL_UNKNOWN)
continue;
if(IsEffectInSpell(buffs[i].spellid, effectid) && i != skipslot)
BuffFadeBySlot(i, false);
}
//we tell BuffFadeBySlot not to recalc, so we can do it only once when were done
CalcBonuses();
}
bool Mob::IsAffectedByBuff(uint16 spell_id)
{
int buff_count = GetMaxTotalSlots();
for (int i = 0; i < buff_count; ++i)
if (buffs[i].spellid == spell_id)
return true;
return false;
}
bool Mob::IsAffectedByBuffByGlobalGroup(GlobalGroup group)
{
int buff_count = GetMaxTotalSlots();
for (int i = 0; i < buff_count; ++i) {
if (buffs[i].spellid == SPELL_UNKNOWN)
continue;
if (spells[buffs[i].spellid].spell_category == static_cast<int>(group))
return true;
}
return false;
}
// checks if 'this' can be affected by spell_id from caster
// returns true if the spell should fail, false otherwise
bool Mob::IsImmuneToSpell(uint16 spell_id, Mob *caster)
{
int effect_index;
if(caster == nullptr)
return(false);
//TODO: this function loops through the effect list for
//this spell like 10 times, this could easily be consolidated
//into one loop through with a switch statement.
LogSpells("Checking to see if we are immune to spell [{}] cast by [{}]", spell_id, caster->GetName());
if(!IsValidSpell(spell_id))
return true;
if(IsBeneficialSpell(spell_id) && (caster->GetNPCTypeID())) //then skip the rest, stop NPCs aggroing each other with buff spells. 2013-03-05
return false;
if(IsMezSpell(spell_id))
{
if(GetSpecialAbility(UNMEZABLE)) {
LogSpells("We are immune to Mez spells");
caster->MessageString(Chat::SpellFailure, CANNOT_MEZ);
int32 aggro = caster->CheckAggroAmount(spell_id, this);
if(aggro > 0) {
AddToHateList(caster, aggro);
} else {
AddToHateList(caster, 1,0,true,false,false,spell_id);
}
return true;
}
// check max level for spell
effect_index = GetSpellEffectIndex(spell_id, SE_Mez);
assert(effect_index >= 0);
// NPCs get to ignore the max level
if((GetLevel() > spells[spell_id].max[effect_index]) &&
(!caster->IsNPC() || (caster->IsNPC() && !RuleB(Spells, NPCIgnoreBaseImmunity))))
{
LogSpells("Our level ([{}]) is higher than the limit of this Mez spell ([{}])", GetLevel(), spells[spell_id].max[effect_index]);
caster->MessageString(Chat::SpellFailure, CANNOT_MEZ_WITH_SPELL);
AddToHateList(caster, 1,0,true,false,false,spell_id);
return true;
}
}
// slow and haste spells
if(GetSpecialAbility(UNSLOWABLE) && IsEffectInSpell(spell_id, SE_AttackSpeed))
{
LogSpells("We are immune to Slow spells");
caster->MessageString(Chat::Red, IMMUNE_ATKSPEED);
int32 aggro = caster->CheckAggroAmount(spell_id, this);
if(aggro > 0) {
AddToHateList(caster, aggro);
} else {
AddToHateList(caster, 1,0,true,false,false,spell_id);
}
return true;
}
// client vs client fear
if(IsEffectInSpell(spell_id, SE_Fear))
{
effect_index = GetSpellEffectIndex(spell_id, SE_Fear);
if(GetSpecialAbility(UNFEARABLE)) {
LogSpells("We are immune to Fear spells");
caster->MessageString(Chat::Red, IMMUNE_FEAR); // need to verify message type, not in MQ2Cast for easy look up
int32 aggro = caster->CheckAggroAmount(spell_id, this);
if(aggro > 0) {
AddToHateList(caster, aggro);
} else {
AddToHateList(caster, 1,0,true,false,false,spell_id);
}
return true;
} else if(IsClient() && caster->IsClient() && (caster->CastToClient()->GetGM() == false))
{
LogSpells("Clients cannot fear eachother!");
caster->MessageString(Chat::Red, IMMUNE_FEAR); // need to verify message type, not in MQ2Cast for easy look up
return true;
}
else if(GetLevel() > spells[spell_id].max[effect_index] && spells[spell_id].max[effect_index] != 0)
{
LogSpells("Level is [{}], cannot be feared by this spell", GetLevel());
caster->MessageString(Chat::Shout, FEAR_TOO_HIGH);
int32 aggro = caster->CheckAggroAmount(spell_id, this);
if (aggro > 0) {
AddToHateList(caster, aggro);
} else {
AddToHateList(caster, 1,0,true,false,false,spell_id);
}
return true;
}
else if (CheckAATimer(aaTimerWarcry))
{
Message(Chat::Red, "Your are immune to fear.");
LogSpells("Clients has WarCry effect, immune to fear!");
caster->MessageString(Chat::Red, IMMUNE_FEAR); // need to verify message type, not in MQ2Cast for easy look up
return true;
}
}
if(IsCharmSpell(spell_id))
{
if(GetSpecialAbility(UNCHARMABLE))
{
LogSpells("We are immune to Charm spells");
caster->MessageString(Chat::Red, CANNOT_CHARM); // need to verify message type, not in MQ2Cast for easy look up
int32 aggro = caster->CheckAggroAmount(spell_id, this);
if(aggro > 0) {
AddToHateList(caster, aggro);
} else {
AddToHateList(caster, 1,0,true,false,false,spell_id);
}
return true;
}
if(this == caster)
{
LogSpells("You are immune to your own charms");
caster->Message(Chat::Red, "You cannot charm yourself."); // need to look up message?
return true;
}
//let npcs cast whatever charm on anyone
if(!caster->IsNPC())
{
// check level limit of charm spell
effect_index = GetSpellEffectIndex(spell_id, SE_Charm);
assert(effect_index >= 0);
if(GetLevel() > spells[spell_id].max[effect_index] && spells[spell_id].max[effect_index] != 0)
{
LogSpells("Our level ([{}]) is higher than the limit of this Charm spell ([{}])", GetLevel(), spells[spell_id].max[effect_index]);
caster->MessageString(Chat::Red, CANNOT_CHARM_YET); // need to verify message type, not in MQ2Cast for easy look up<Paste>
AddToHateList(caster, 1,0,true,false,false,spell_id);
return true;
}
}
}
if
(
IsEffectInSpell(spell_id, SE_Root) ||
IsEffectInSpell(spell_id, SE_MovementSpeed)
)
{
if(GetSpecialAbility(UNSNAREABLE)) {
LogSpells("We are immune to Snare spells");
caster->MessageString(Chat::Red, IMMUNE_MOVEMENT);
int32 aggro = caster->CheckAggroAmount(spell_id, this);
if(aggro > 0) {
AddToHateList(caster, aggro);
} else {
AddToHateList(caster, 1,0,true,false,false,spell_id);
}
return true;
}
}
if(IsLifetapSpell(spell_id))
{
if(this == caster)
{
LogSpells("You cannot lifetap yourself");
caster->MessageString(Chat::SpellFailure, CANT_DRAIN_SELF);
return true;
}
}
if(IsSacrificeSpell(spell_id))
{
if(this == caster)
{
LogSpells("You cannot sacrifice yourself");
caster->MessageString(Chat::SpellFailure, CANNOT_SAC_SELF);
return true;
}
}
LogSpells("No immunities to spell [{}] found", spell_id);
return false;
}
int Mob::GetResist(uint8 resist_type)
{
switch(resist_type)
{
case RESIST_FIRE:
return GetFR();
case RESIST_COLD:
return GetCR();
case RESIST_MAGIC:
return GetMR();
case RESIST_DISEASE:
return GetDR();
case RESIST_POISON:
return GetPR();
case RESIST_CORRUPTION:
return GetCorrup();
case RESIST_PRISMATIC:
return (GetFR() + GetCR() + GetMR() + GetDR() + GetPR()) / 5;
case RESIST_CHROMATIC:
return std::min({GetFR(), GetCR(), GetMR(), GetDR(), GetPR()});
case RESIST_PHYSICAL:
if (IsNPC())
return GetPhR();
else
return 0;
default:
return 0;
}
}
//
// Spell resists:
// returns an effectiveness index from 0 to 100. for most spells, 100 means
// it landed, and anything else means it was resisted; however there are some
// spells that can be partially effective, and this value can be used there.
// TODO: we need to figure out how the following pvp values work and implement them
// pvp_resist_base
// pvp_resist_calc
// pvp_resist_cap
float Mob::ResistSpell(uint8 resist_type, uint16 spell_id, Mob *caster, bool use_resist_override, int resist_override, bool CharismaCheck, bool CharmTick, bool IsRoot, int level_override)
{
if(!caster)
{
return 100;
}
if(spell_id != 0 && !IsValidSpell(spell_id))
{
return 0;
}
if(GetSpecialAbility(IMMUNE_CASTING_FROM_RANGE))
{
if(!caster->CombatRange(this))
{
return(0);
}
}
if(GetSpecialAbility(IMMUNE_MAGIC))
{
LogSpells("We are immune to magic, so we fully resist the spell [{}]", spell_id);
return(0);
}
//Get resist modifier and adjust it based on focus 2 resist about eq to 1% resist chance
int resist_modifier = 0;
if (use_resist_override) {
resist_modifier = resist_override;
} else {
// PVP, we don't have the normal per_level or cap stuff implemented ... so ahh do that
// and make sure the PVP versions are also handled.
if (IsClient() && caster->IsClient()) {
resist_modifier = spells[spell_id].pvpresistbase;
} else {
resist_modifier = spells[spell_id].ResistDiff;
}
}
if(caster->GetSpecialAbility(CASTING_RESIST_DIFF))
resist_modifier += caster->GetSpecialAbilityParam(CASTING_RESIST_DIFF, 0);
int focus_resist = caster->GetFocusEffect(focusResistRate, spell_id);
resist_modifier -= 2 * focus_resist;
int focus_incoming_resist = GetFocusEffect(focusFcResistIncoming, spell_id);
resist_modifier -= focus_incoming_resist;
//Check for fear resist
bool IsFear = false;
if(IsFearSpell(spell_id))
{
IsFear = true;
int fear_resist_bonuses = CalcFearResistChance();
if(zone->random.Roll(fear_resist_bonuses))
{
LogSpells("Resisted spell in fear resistance, had [{}] chance to resist", fear_resist_bonuses);
return 0;
}
}
if (!CharmTick){
//Check for Spell Effect specific resistance chances (ie AA Mental Fortitude)
int se_resist_bonuses = GetSpellEffectResistChance(spell_id);
if(se_resist_bonuses && zone->random.Roll(se_resist_bonuses))
{
return 0;
}
// Check for Chance to Resist Spell bonuses (ie Sanctification Discipline)
int resist_bonuses = CalcResistChanceBonus();
if(resist_bonuses && zone->random.Roll(resist_bonuses))
{
LogSpells("Resisted spell in sanctification, had [{}] chance to resist", resist_bonuses);
return 0;
}
}
//Get the resist chance for the target
if(resist_type == RESIST_NONE || spells[spell_id].no_resist)
{
LogSpells("Spell was unresistable");
return 100;
}
int target_resist = GetResist(resist_type);
// JULY 24, 2002 changes
int level = GetLevel();
if (RuleB(Spells,July242002PetResists) && IsPetOwnerClient() && caster->IsNPC() && !caster->IsPetOwnerClient()) {
auto owner = GetOwner();
if (owner != nullptr) {
target_resist = std::max(target_resist, owner->GetResist(resist_type));
level = owner->GetLevel();
}
}
//Setup our base resist chance.
int resist_chance = 0;
int level_mod = 0;
//Adjust our resist chance based on level modifiers
uint8 caster_level = level_override > 0 ? level_override : caster->GetLevel();
int temp_level_diff = level - caster_level;
//Physical Resists are calclated using their own formula derived from extensive parsing.
if (resist_type == RESIST_PHYSICAL) {
level_mod = ResistPhysical(temp_level_diff, caster_level);
}
else {
if(IsNPC() && level >= RuleI(Casting,ResistFalloff))
{
int a = (RuleI(Casting,ResistFalloff)-1) - caster_level;
if(a > 0)
{
temp_level_diff = a;
}
else
{
temp_level_diff = 0;
}
}
if(IsClient() && level >= 21 && temp_level_diff > 15)
{
temp_level_diff = 15;
}
if(IsNPC() && temp_level_diff < -9)
{
temp_level_diff = -9;
}
level_mod = temp_level_diff * temp_level_diff / 2;
if(temp_level_diff < 0)
{
level_mod = -level_mod;
}
if(IsNPC() && (caster_level - level) < -20)
{
level_mod = 1000;
}
//Even more level stuff this time dealing with damage spells
if(IsNPC() && IsDamageSpell(spell_id) && level >= 17)
{
int level_diff;
if(level >= RuleI(Casting,ResistFalloff))
{
level_diff = (RuleI(Casting,ResistFalloff)-1) - caster_level;
if(level_diff < 0)
{
level_diff = 0;
}
}
else
{
level_diff = level - caster_level;
}
level_mod += (2 * level_diff);
}
}
if (CharismaCheck)
{
/*
Charisma ONLY effects the initial resist check when charm is cast with 10 CHA = -1 Resist mod up to 255 CHA (min ~ 75 cha)
Charisma less than ~ 75 gives a postive modifier to resist checks at approximate ratio of -10 CHA = +6 Resist.
Mez spells do same initial resist check as a above.
Lull spells only check charisma if inital cast is resisted to see if mob will aggro, same modifier/cap as above.
Charisma DOES NOT extend charm durations.
Fear resist chance is given a -20 resist modifier if CHA is < 100, from 100-255 it progressively reduces the negative mod to 0.
Fears verse undead DO NOT apply a charisma modifer. (Note: unknown Base1 values defined in undead fears do not effect duration).
*/
int16 charisma = caster->GetCHA();
if (IsFear && (spells[spell_id].targettype != ST_Undead)){
if (charisma < 100)
resist_modifier -= 20;
else if (charisma <= 255)
resist_modifier += (charisma - 100)/8;
}
else {
if (charisma >= 75){
if (charisma > RuleI(Spells, CharismaEffectivenessCap))
charisma = RuleI(Spells, CharismaEffectivenessCap);
resist_modifier -= (charisma - 75)/RuleI(Spells, CharismaEffectiveness);
}
else
resist_modifier += ((75 - charisma)/10) * 6; //Increase Resist Chance
}
}
//Lull spells DO NOT use regular resists on initial cast, instead they use a flat +15 modifier. Live parses confirm this.
//Regular resists are used when checking if mob will aggro off of a lull resist.
if(!CharismaCheck && IsHarmonySpell(spell_id))
target_resist = 15;
//Add our level, resist and -spell resist modifier to our roll chance
resist_chance += level_mod;
resist_chance += resist_modifier;
resist_chance += target_resist;
resist_chance = mod_spell_resist(resist_chance, level_mod, resist_modifier, target_resist, resist_type, spell_id, caster);
//Do our min and max resist checks.
if(resist_chance > spells[spell_id].MaxResist && spells[spell_id].MaxResist != 0)
{
resist_chance = spells[spell_id].MaxResist;
}
if(resist_chance < spells[spell_id].MinResist && spells[spell_id].MinResist != 0)
{
resist_chance = spells[spell_id].MinResist;
}
//Average charm duration agianst mobs with 0% chance to resist on LIVE is ~ 68 ticks.
//Minimum resist chance should be caclulated factoring in the RuleI(Spells, CharmBreakCheckChance)
if (CharmTick) {
float min_charmbreakchance = ((100.0f/static_cast<float>(RuleI(Spells, CharmBreakCheckChance)))/66.0f * 100.0f)*2.0f;
if (resist_chance < static_cast<int>(min_charmbreakchance))
resist_chance = min_charmbreakchance;
}
//Average root duration agianst mobs with 0% chance to resist on LIVE is ~ 22 ticks (6% resist chance).
//Minimum resist chance should be caclulated factoring in the RuleI(Spells, RootBreakCheckChance)
if (IsRoot) {
float min_rootbreakchance = ((100.0f/static_cast<float>(RuleI(Spells, RootBreakCheckChance)))/22.0f * 100.0f)*2.0f;
if (resist_chance < static_cast<int>(min_rootbreakchance))
resist_chance = min_rootbreakchance;
}
//Finally our roll
int roll = zone->random.Int(0, 200);
if(roll > resist_chance)
{
return 100;
}
else
{
//This is confusing but it's basically right
//It skews partial resists up over 100 more often than not
if(!IsPartialCapableSpell(spell_id))
{
return 0;
}
else
{
if(resist_chance < 1)
{
resist_chance = 1;
}
int partial_modifier = ((150 * (resist_chance - roll)) / resist_chance);
if(IsNPC())
{
if(level > caster_level && level >= 17 && caster_level <= 50)
{
partial_modifier += 5;
}
if(level >= 30 && caster_level < 50)
{
partial_modifier += (caster_level - 25);
}
if(level < 15)
{
partial_modifier -= 5;
}
}
if(caster->IsNPC())
{
if((level - caster_level) >= 20)
{
partial_modifier += (level - caster_level) * 1.5;
}
}
if(partial_modifier <= 0)
{
return 100;
}
else if(partial_modifier >= 100)
{
return 0;
}
return (100.0f - partial_modifier);
}
}
}
int Mob::ResistPhysical(int level_diff, uint8 caster_level)
{
/* Physical resists use the standard level mod calculation in
conjunction with a resist fall off formula that greatly prevents you
from landing abilities on mobs that are higher level than you.
After level 12, every 4 levels gained the max level you can hit
your target without a sharp resist penalty is raised by 1.
Extensive parsing confirms this, along with baseline phyiscal resist rates used.
*/
if (level_diff == 0)
return level_diff;
int level_mod = 0;
if (level_diff > 0) {
int ResistFallOff = 0;
if (caster_level <= 12)
ResistFallOff = 3;
else
ResistFallOff = caster_level/4;
if (level_diff > ResistFallOff || level_diff >= 15)
level_mod = ((level_diff * 10) + level_diff)*2;
else
level_mod = level_diff * level_diff / 2;
}
else
level_mod = -(level_diff * level_diff / 2);
return level_mod;
}
int16 Mob::CalcResistChanceBonus()
{
int resistchance = spellbonuses.ResistSpellChance + itembonuses.ResistSpellChance;
if(IsClient())
resistchance += aabonuses.ResistSpellChance;
return resistchance;
}
int16 Mob::CalcFearResistChance()
{
int resistchance = spellbonuses.ResistFearChance + itembonuses.ResistFearChance;
if(this->IsClient()) {
resistchance += aabonuses.ResistFearChance;
if(aabonuses.Fearless == true)
resistchance = 100;
}
if(spellbonuses.Fearless == true || itembonuses.Fearless == true)
resistchance = 100;
return resistchance;
}
/**
* @param spell_id
* @return
*/
float Mob::GetAOERange(uint16 spell_id)
{
float range = spells[spell_id].aoerange;
/**
* For TGB
*/
if (range == 0) {
range = spells[spell_id].range;
}
if (range == 0) {
range = 10;
}
if (IsBardSong(spell_id) && IsBeneficialSpell(spell_id)) {
//Live AA - Extended Notes, SionachiesCrescendo
float song_bonus = static_cast<float>(aabonuses.SongRange + spellbonuses.SongRange + itembonuses.SongRange);
range += range * song_bonus / 100.0f;
}
return GetActSpellRange(spell_id, range);
}
///////////////////////////////////////////////////////////////////////////////
// 'other' functions
void Mob::Spin() {
if(IsClient()) {
auto outapp = new EQApplicationPacket(OP_Action, sizeof(Action_Struct));
outapp->pBuffer[0] = 0x0B;
outapp->pBuffer[1] = 0x0A;
outapp->pBuffer[2] = 0x0B;
outapp->pBuffer[3] = 0x0A;
outapp->pBuffer[4] = 0xE7;
outapp->pBuffer[5] = 0x00;
outapp->pBuffer[6] = 0x4D;
outapp->pBuffer[7] = 0x04;
outapp->pBuffer[8] = 0x00;
outapp->pBuffer[9] = 0x00;
outapp->pBuffer[10] = 0x00;
outapp->pBuffer[11] = 0x00;
outapp->pBuffer[12] = 0x00;
outapp->pBuffer[13] = 0x00;
outapp->pBuffer[14] = 0x00;
outapp->pBuffer[15] = 0x00;
outapp->pBuffer[16] = 0x00;
outapp->pBuffer[17] = 0x00;
outapp->pBuffer[18] = 0xD4;
outapp->pBuffer[19] = 0x43;
outapp->pBuffer[20] = 0x00;
outapp->pBuffer[21] = 0x00;
outapp->pBuffer[22] = 0x00;
outapp->priority = 5;
CastToClient()->QueuePacket(outapp);
safe_delete(outapp);
}
else {
float x,y,z,h;
x=GetX();
y=GetY();
z=GetZ();
h=GetHeading()+5;
if (IsCorpse() || (IsClient() && !IsAIControlled())) {
m_Position.x = x;
m_Position.y = y;
m_Position.z = z;
mMovementManager->SendCommandToClients(this, 0.0, 0.0, 0.0, 0.0, 0, ClientRangeAny);
}
else {
Teleport(glm::vec4(x, y, z, h));
}
}
}
void Mob::SendSpellBarDisable()
{
if (!IsClient())
return;
CastToClient()->MemorizeSpell(0, SPELLBAR_UNLOCK, memSpellSpellbar);
}
// this puts the spell bar back into a usable state fast
void Mob::SendSpellBarEnable(uint16 spell_id)
{
if(!IsClient())
return;
auto outapp = new EQApplicationPacket(OP_ManaChange, sizeof(ManaChange_Struct));
ManaChange_Struct* manachange = (ManaChange_Struct*)outapp->pBuffer;
manachange->new_mana = GetMana();
manachange->spell_id = spell_id;
manachange->stamina = CastToClient()->GetEndurance();
manachange->keepcasting = 0;
outapp->priority = 6;
CastToClient()->QueuePacket(outapp);
safe_delete(outapp);
}
void Mob::Stun(int duration)
{
//make sure a shorter stun does not overwrite a longer one.
if(stunned && stunned_timer.GetRemainingTime() > uint32(duration))
return;
auto spell_id = bardsong ? bardsong : casting_spell_id;
if(IsValidSpell(spell_id) && !spells[spell_id].uninterruptable) {
int persistent_casting = spellbonuses.PersistantCasting + itembonuses.PersistantCasting + aabonuses.PersistantCasting;
if(zone->random.Int(0,99) > persistent_casting)
InterruptSpell(spell_id);
}
if(duration > 0)
{
stunned = true;
stunned_timer.Start(duration);
SendAddPlayerState(PlayerState::Stunned);
}
}
void Mob::UnStun() {
if(stunned && stunned_timer.Enabled()) {
stunned = false;
stunned_timer.Disable();
SendRemovePlayerState(PlayerState::Stunned);
}
}
// Stuns "this"
void Client::Stun(int duration)
{
Mob::Stun(duration);
auto outapp = new EQApplicationPacket(OP_Stun, sizeof(Stun_Struct));
Stun_Struct* stunon = (Stun_Struct*) outapp->pBuffer;
stunon->duration = duration;
outapp->priority = 5;
QueuePacket(outapp);
safe_delete(outapp);
}
void Client::UnStun() {
Mob::UnStun();
auto outapp = new EQApplicationPacket(OP_Stun, sizeof(Stun_Struct));
Stun_Struct* stunon = (Stun_Struct*) outapp->pBuffer;
stunon->duration = 0;
outapp->priority = 5;
QueuePacket(outapp);
safe_delete(outapp);
}
void NPC::Stun(int duration) {
Mob::Stun(duration);
StopNavigation();
}
void NPC::UnStun() {
Mob::UnStun();
}
void Mob::Mesmerize()
{
mezzed = true;
auto spell_id = bardsong ? bardsong : casting_spell_id;
if (spell_id)
InterruptSpell(spell_id);
StopNavigation();
}
void Client::MakeBuffFadePacket(uint16 spell_id, int slot_id, bool send_message)
{
EQApplicationPacket* outapp = nullptr;
outapp = new EQApplicationPacket(OP_Buff, sizeof(SpellBuffPacket_Struct));
SpellBuffPacket_Struct* sbf = (SpellBuffPacket_Struct*) outapp->pBuffer;
sbf->entityid = GetID();
// i dont know why but this works.. for now
sbf->buff.effect_type = 2;
// sbf->slot=m_pp.buffs[slot_id].slotid;
// sbf->level=m_pp.buffs[slot_id].level;
// sbf->effect=m_pp.buffs[slot_id].effect;
sbf->buff.spellid = spell_id;
sbf->slotid = slot_id;
sbf->bufffade = 1;
#if EQDEBUG >= 11
printf("Sending SBF 1 from server:\n");
DumpPacket(outapp);
#endif
QueuePacket(outapp);
/*
sbf->effect=0;
sbf->level=0;
sbf->slot=0;
*/
sbf->buff.spellid = 0xffffffff;
#if EQDEBUG >= 11
printf("Sending SBF 2 from server:\n");
DumpPacket(outapp);
#endif
QueuePacket(outapp);
safe_delete(outapp);
if(send_message)
{
const char *fadetext = spells[spell_id].spell_fades;
outapp = new EQApplicationPacket(OP_ColoredText, sizeof(ColoredText_Struct) + strlen(fadetext));
ColoredText_Struct *bfm = (ColoredText_Struct *) outapp->pBuffer;
bfm->color = Chat::Spells;
memcpy(bfm->msg, fadetext, strlen(fadetext));
QueuePacket(outapp);
safe_delete(outapp);
}
}
void Client::MemSpell(uint16 spell_id, int slot, bool update_client)
{
if(slot >= EQ::spells::SPELL_GEM_COUNT || slot < 0)
return;
if(update_client)
{
if(m_pp.mem_spells[slot] != 0xFFFFFFFF)
UnmemSpell(slot, update_client);
}
m_pp.mem_spells[slot] = spell_id;
LogSpells("Spell [{}] memorized into slot [{}]", spell_id, slot);
database.SaveCharacterMemorizedSpell(this->CharacterID(), m_pp.mem_spells[slot], slot);
if(update_client)
{
MemorizeSpell(slot, spell_id, memSpellMemorize);
}
}
void Client::UnmemSpell(int slot, bool update_client)
{
if(slot > EQ::spells::SPELL_GEM_COUNT || slot < 0)
return;
LogSpells("Spell [{}] forgotten from slot [{}]", m_pp.mem_spells[slot], slot);
m_pp.mem_spells[slot] = 0xFFFFFFFF;
database.DeleteCharacterMemorizedSpell(this->CharacterID(), m_pp.mem_spells[slot], slot);
if(update_client)
{
MemorizeSpell(slot, m_pp.mem_spells[slot], memSpellForget);
}
}
void Client::UnmemSpellBySpellID(int32 spell_id)
{
for(int i = 0; i < EQ::spells::SPELL_GEM_COUNT; i++) {
if(m_pp.mem_spells[i] == spell_id) {
UnmemSpell(i, true);
break;
}
}
}
void Client::UnmemSpellAll(bool update_client)
{
int i;
for(i = 0; i < EQ::spells::SPELL_GEM_COUNT; i++)
if(m_pp.mem_spells[i] != 0xFFFFFFFF)
UnmemSpell(i, update_client);
}
uint32 Client::GetSpellIDByBookSlot(int book_slot) {
if (book_slot <= EQ::spells::SPELLBOOK_SIZE) {
return GetSpellByBookSlot(book_slot);
}
return -1; //default
}
uint16 Client::FindMemmedSpellBySlot(int slot) {
if (m_pp.mem_spells[slot] != 0xFFFFFFFF)
return m_pp.mem_spells[slot];
return 0;
}
int Client::MemmedCount() {
int memmed_count = 0;
for (int i = 0; i < EQ::spells::SPELL_GEM_COUNT; i++)
if (m_pp.mem_spells[i] != 0xFFFFFFFF)
memmed_count++;
return memmed_count;
}
void Client::ScribeSpell(uint16 spell_id, int slot, bool update_client, bool defer_save)
{
if (slot >= EQ::spells::SPELLBOOK_SIZE || slot < 0) {
return;
}
if (update_client) {
if (m_pp.spell_book[slot] != 0xFFFFFFFF) {
UnscribeSpell(slot, update_client, defer_save);
}
}
m_pp.spell_book[slot] = spell_id;
// defer save if we're bulk saving elsewhere
if (!defer_save) {
database.SaveCharacterSpell(this->CharacterID(), spell_id, slot);
}
LogSpells("Spell [{}] scribed into spell book slot [{}]", spell_id, slot);
if (update_client) {
MemorizeSpell(slot, spell_id, memSpellScribing);
}
}
void Client::UnscribeSpell(int slot, bool update_client, bool defer_save)
{
if (slot >= EQ::spells::SPELLBOOK_SIZE || slot < 0) {
return;
}
LogSpells("Spell [{}] erased from spell book slot [{}]", m_pp.spell_book[slot], slot);
m_pp.spell_book[slot] = 0xFFFFFFFF;
if (!defer_save) {
database.DeleteCharacterSpell(this->CharacterID(), m_pp.spell_book[slot], slot);
}
if (update_client && slot < EQ::spells::DynamicLookup(ClientVersion(), GetGM())->SpellbookSize) {
auto outapp = new EQApplicationPacket(OP_DeleteSpell, sizeof(DeleteSpell_Struct));
DeleteSpell_Struct *del = (DeleteSpell_Struct *) outapp->pBuffer;
del->spell_slot = slot;
del->success = 1;
QueuePacket(outapp);
safe_delete(outapp);
}
}
void Client::UnscribeSpellAll(bool update_client)
{
for (int i = 0; i < EQ::spells::SPELLBOOK_SIZE; i++) {
if (m_pp.spell_book[i] != 0xFFFFFFFF) {
UnscribeSpell(i, update_client, true);
}
}
// bulk save at end (this will only delete)
SaveSpells();
}
void Client::UntrainDisc(int slot, bool update_client, bool defer_save)
{
if (slot >= MAX_PP_DISCIPLINES || slot < 0) {
return;
}
LogSpells("Discipline [{}] untrained from slot [{}]", m_pp.disciplines.values[slot], slot);
m_pp.disciplines.values[slot] = 0;
if (!defer_save) {
database.DeleteCharacterDisc(this->CharacterID(), slot);
}
if (update_client) {
SendDisciplineUpdate();
}
}
void Client::UntrainDiscAll(bool update_client)
{
for (int i = 0; i < MAX_PP_DISCIPLINES; i++) {
if (m_pp.disciplines.values[i] != 0) {
UntrainDisc(i, update_client, true);
}
}
// bulk delete / save
SaveDisciplines();
}
void Client::UntrainDiscBySpellID(uint16 spell_id, bool update_client)
{
for (int slot = 0; slot < MAX_PP_DISCIPLINES; slot++) {
if (m_pp.disciplines.values[slot] == spell_id) {
UntrainDisc(slot, update_client);
return;
}
}
}
int Client::GetNextAvailableSpellBookSlot(int starting_slot) {
for (int i = starting_slot; i < EQ::spells::SPELLBOOK_SIZE; i++) { //using starting_slot should help speed this up when we're iterating through a bunch of spells
if (!IsValidSpell(GetSpellByBookSlot(i)))
return i;
}
return -1; //default
}
int Client::FindSpellBookSlotBySpellID(uint16 spellid) {
for(int i = 0; i < EQ::spells::SPELLBOOK_SIZE; i++) {
if(m_pp.spell_book[i] == spellid)
return i;
}
return -1; //default
}
uint32 Client::GetHighestScribedSpellinSpellGroup(uint32 spell_group)
{
//Typical live spells follow 1/5/10 rank value for actual ranks 1/2/3, but this can technically be set as anything.
int highest_rank = 0; //highest ranked found in spellgroup
uint32 highest_spell_id = 0; //spell_id of the highest ranked spell you have scribed in that spell rank.
for (int i = 0; i < EQ::spells::SPELLBOOK_SIZE; i++) {
if (IsValidSpell(m_pp.spell_book[i])) {
if (spells[m_pp.spell_book[i]].spellgroup == spell_group) {
if (highest_rank < spells[m_pp.spell_book[i]].rank) {
highest_rank = spells[m_pp.spell_book[i]].rank;
highest_spell_id = m_pp.spell_book[i];
}
}
}
}
return highest_spell_id;
}
bool Client::SpellGlobalCheck(uint16 spell_id, uint32 char_id) {
std::string spell_global_name;
int spell_global_value;
int global_value;
std::string query = StringFormat("SELECT qglobal, value FROM spell_globals WHERE spellid = %i", spell_id);
auto results = database.QueryDatabase(query);
if (!results.Success()) {
return false; // Query failed, so prevent spell from scribing just in case
}
if (results.RowCount() != 1)
return true; // Spell ID isn't listed in the spells_global table, so it is not restricted from scribing
auto row = results.begin();
spell_global_name = row[0];
spell_global_value = atoi(row[1]);
if (spell_global_name.empty())
return true; // If the entry in the spell_globals table has nothing set for the qglobal name
query = StringFormat("SELECT value FROM quest_globals "
"WHERE charid = %i AND name = '%s'",
char_id, spell_global_name.c_str());
results = database.QueryDatabase(query);
if (!results.Success()) {
LogError(
"Spell ID [{}] query of spell_globals with Name: [{}] Value: [{}] failed",
spell_id,
spell_global_name.c_str(),
spell_global_value
);
return false;
}
if (results.RowCount() != 1) {
LogError(
"Char ID: [{}] does not have the Qglobal Name: [{}] for Spell ID [{}]",
char_id,
spell_global_name.c_str(),
spell_id
);
return false;
}
row = results.begin();
global_value = atoi(row[0]);
if (global_value == spell_global_value) {
return true; // If the values match from both tables, allow the spell to be scribed
}
else if (global_value > spell_global_value) {
return true;
} // Check if the qglobal value is greater than the require spellglobal value
// If no matching result found in qglobals, don't scribe this spell
LogError(
"Char ID: [{}] SpellGlobals Name: [{}] Value: [{}] did not match QGlobal Value: [{}] for Spell ID [{}]",
char_id,
spell_global_name.c_str(),
spell_global_value,
global_value,
spell_id
);
return false;
}
bool Client::SpellBucketCheck(uint16 spell_id, uint32 char_id) {
std::string spell_bucket_name;
int spell_bucket_value;
int bucket_value;
std::string query = StringFormat("SELECT `key`, value FROM spell_buckets WHERE spellid = %i", spell_id);
auto results = database.QueryDatabase(query);
if (!results.Success())
return false;
if (results.RowCount() != 1)
return true;
auto row = results.begin();
spell_bucket_name = row[0];
spell_bucket_value = atoi(row[1]);
if (spell_bucket_name.empty())
return true;
query = StringFormat("SELECT value FROM data_buckets WHERE `key` = '%i-%s'", char_id, spell_bucket_name.c_str());
results = database.QueryDatabase(query);
if (!results.Success()) {
LogError(
"Spell bucket [{}] for spell ID [{}] for char ID [{}] failed",
spell_bucket_name.c_str(),
spell_id,
char_id
);
return false;
}
if (results.RowCount() != 1) {
LogError(
"Spell bucket [{}] does not exist for spell ID [{}] for char ID [{}]",
spell_bucket_name.c_str(),
spell_id,
char_id
);
return false;
}
row = results.begin();
bucket_value = atoi(row[0]);
if (bucket_value == spell_bucket_value)
return true; // If the values match from both tables, allow the spell to be scribed
else if (bucket_value > spell_bucket_value)
return true; // Check if the data bucket value is greater than the required spell bucket value
// If no matching result found in spell buckets, don't scribe this spell
LogError("Spell bucket [{}] for spell ID [{}] for char ID [{}] did not match value [{}]", spell_bucket_name.c_str(), spell_id, char_id, spell_bucket_value);
return false;
}
// TODO get rid of this
int16 Mob::GetBuffSlotFromType(uint16 type) {
uint32 buff_count = GetMaxTotalSlots();
for (int i = 0; i < buff_count; i++) {
if (buffs[i].spellid != SPELL_UNKNOWN) {
for (int j = 0; j < EFFECT_COUNT; j++) {
if (spells[buffs[i].spellid].effectid[j] == type )
return i;
}
}
}
return -1;
}
uint16 Mob::GetSpellIDFromSlot(uint8 slot)
{
if (buffs[slot].spellid != SPELL_UNKNOWN)
return buffs[slot].spellid;
return 0;
}
bool Mob::FindType(uint16 type, bool bOffensive, uint16 threshold) {
int buff_count = GetMaxTotalSlots();
for (int i = 0; i < buff_count; i++) {
if (buffs[i].spellid != SPELL_UNKNOWN) {
for (int j = 0; j < EFFECT_COUNT; j++) {
// adjustments necessary for offensive npc casting behavior
if (bOffensive) {
if (spells[buffs[i].spellid].effectid[j] == type) {
int16 value =
CalcSpellEffectValue_formula(spells[buffs[i].spellid].buffdurationformula,
spells[buffs[i].spellid].base[j],
spells[buffs[i].spellid].max[j],
buffs[i].casterlevel, buffs[i].spellid);
Log(Logs::General, Logs::Normal,
"FindType: type = %d; value = %d; threshold = %d",
type, value, threshold);
if (value < threshold)
return true;
}
} else {
if (spells[buffs[i].spellid].effectid[j] == type )
return true;
}
}
}
}
return false;
}
bool Mob::IsCombatProc(uint16 spell_id) {
if (RuleB(Spells, FocusCombatProcs))
return false;
if(spell_id == SPELL_UNKNOWN)
return(false);
if ((spells[spell_id].cast_time == 0) && (spells[spell_id].recast_time == 0) && (spells[spell_id].recovery_time == 0))
{
for (int i = 0; i < MAX_PROCS; i++){
if (PermaProcs[i].spellID == spell_id || SpellProcs[i].spellID == spell_id
|| RangedProcs[i].spellID == spell_id){
return true;
}
}
}
return false;
}
bool Mob::AddProcToWeapon(uint16 spell_id, bool bPerma, uint16 iChance, uint16 base_spell_id, int level_override) {
if(spell_id == SPELL_UNKNOWN)
return(false);
int i;
if (bPerma) {
for (i = 0; i < MAX_PROCS; i++) {
if (PermaProcs[i].spellID == SPELL_UNKNOWN) {
PermaProcs[i].spellID = spell_id;
PermaProcs[i].chance = iChance;
PermaProcs[i].base_spellID = base_spell_id;
PermaProcs[i].level_override = level_override;
LogSpells("Added permanent proc spell [{}] with chance [{}] to slot [{}]", spell_id, iChance, i);
return true;
}
}
LogSpells("Too many perma procs for [{}]", GetName());
} else {
// If its a poison proc, replace any existing one if present.
if (base_spell_id == POISON_PROC) {
for (i = 0; i < MAX_PROCS; i++) {
// If we already have a poison proc active replace it and return
if (SpellProcs[i].base_spellID == POISON_PROC) {
SpellProcs[i].spellID = spell_id;
SpellProcs[i].chance = iChance;
SpellProcs[i].level_override = level_override;
Log(Logs::Detail, Logs::Spells, "Replaced poison-granted proc spell %d with chance %d to slot %d", spell_id, iChance, i);
return true;
}
}
}
// If we get here it either wasn't poison (which can only use 1 slot)
// or it is poison and no poison procs are currently present.
// Find a slot and use it as normal.
for (i = 0; i < MAX_PROCS; i++) {
if (SpellProcs[i].spellID == SPELL_UNKNOWN) {
SpellProcs[i].spellID = spell_id;
SpellProcs[i].chance = iChance;
SpellProcs[i].base_spellID = base_spell_id;;
SpellProcs[i].level_override = level_override;
LogSpells("Added [{}]-granted proc spell [{}] with chance [{}] to slot [{}]", (base_spell_id == POISON_PROC) ? "poison" : "spell", spell_id, iChance, i);
return true;
}
}
LogSpells("Too many procs for [{}]", GetName());
}
return false;
}
bool Mob::RemoveProcFromWeapon(uint16 spell_id, bool bAll) {
for (int i = 0; i < MAX_PROCS; i++) {
if (bAll || SpellProcs[i].spellID == spell_id) {
SpellProcs[i].spellID = SPELL_UNKNOWN;
SpellProcs[i].chance = 0;
SpellProcs[i].base_spellID = SPELL_UNKNOWN;
SpellProcs[i].level_override = -1;
LogSpells("Removed proc [{}] from slot [{}]", spell_id, i);
}
}
return true;
}
bool Mob::AddDefensiveProc(uint16 spell_id, uint16 iChance, uint16 base_spell_id)
{
if(spell_id == SPELL_UNKNOWN)
return(false);
int i;
for (i = 0; i < MAX_PROCS; i++) {
if (DefensiveProcs[i].spellID == SPELL_UNKNOWN) {
DefensiveProcs[i].spellID = spell_id;
DefensiveProcs[i].chance = iChance;
DefensiveProcs[i].base_spellID = base_spell_id;
LogSpells("Added spell-granted defensive proc spell [{}] with chance [{}] to slot [{}]", spell_id, iChance, i);
return true;
}
}
return false;
}
bool Mob::RemoveDefensiveProc(uint16 spell_id, bool bAll)
{
for (int i = 0; i < MAX_PROCS; i++) {
if (bAll || DefensiveProcs[i].spellID == spell_id) {
DefensiveProcs[i].spellID = SPELL_UNKNOWN;
DefensiveProcs[i].chance = 0;
DefensiveProcs[i].base_spellID = SPELL_UNKNOWN;
LogSpells("Removed defensive proc [{}] from slot [{}]", spell_id, i);
}
}
return true;
}
bool Mob::AddRangedProc(uint16 spell_id, uint16 iChance, uint16 base_spell_id)
{
if(spell_id == SPELL_UNKNOWN)
return(false);
int i;
for (i = 0; i < MAX_PROCS; i++) {
if (RangedProcs[i].spellID == SPELL_UNKNOWN) {
RangedProcs[i].spellID = spell_id;
RangedProcs[i].chance = iChance;
RangedProcs[i].base_spellID = base_spell_id;
LogSpells("Added spell-granted ranged proc spell [{}] with chance [{}] to slot [{}]", spell_id, iChance, i);
return true;
}
}
return false;
}
bool Mob::RemoveRangedProc(uint16 spell_id, bool bAll)
{
for (int i = 0; i < MAX_PROCS; i++) {
if (bAll || RangedProcs[i].spellID == spell_id) {
RangedProcs[i].spellID = SPELL_UNKNOWN;
RangedProcs[i].chance = 0;
RangedProcs[i].base_spellID = SPELL_UNKNOWN;;
LogSpells("Removed ranged proc [{}] from slot [{}]", spell_id, i);
}
}
return true;
}
// this is checked in a few places to decide wether special bard
// behavior should be used.
bool Mob::UseBardSpellLogic(uint16 spell_id, int slot)
{
if(spell_id == SPELL_UNKNOWN)
spell_id = casting_spell_id;
if(slot == -1)
slot = static_cast<int>(casting_spell_slot);
// should we treat this as a bard singing?
return
(
spell_id != 0 &&
spell_id != SPELL_UNKNOWN &&
slot != -1 &&
GetClass() == BARD &&
slot <= EQ::spells::SPELL_GEM_COUNT &&
IsBardSong(spell_id)
);
}
int Mob::GetCasterLevel(uint16 spell_id) {
int level = GetLevel();
level += itembonuses.effective_casting_level + spellbonuses.effective_casting_level + aabonuses.effective_casting_level;
LogSpells("Determined effective casting level [{}]+[{}]+[{}]=[{}]", GetLevel(), spellbonuses.effective_casting_level, itembonuses.effective_casting_level, level);
return std::max(1, level);
}
//this method does NOT tell the client to stop singing the song.
//this is NOT the right way to stop a mob from singing, use InterruptSpell
//you should really know what your doing before you call this
void Mob::_StopSong()
{
bardsong = 0;
bardsong_target_id = 0;
bardsong_slot = CastingSlot::Gem1;
bardsong_timer.Disable();
}
//This member function sets the buff duration on the client
//however it does not work if sent quickly after an action packets, which is what one might perfer to do
//Thus I use this in the buff process to update the correct duration once after casting
//this allows AAs and focus effects that increase buff duration to work correctly, but could probably
//be used for other things as well
void Client::SendBuffDurationPacket(Buffs_Struct &buff, int slot)
{
EQApplicationPacket* outapp = nullptr;
outapp = new EQApplicationPacket(OP_Buff, sizeof(SpellBuffPacket_Struct));
SpellBuffPacket_Struct* sbf = (SpellBuffPacket_Struct*) outapp->pBuffer;
sbf->entityid = GetID();
sbf->buff.effect_type = 2;
sbf->buff.level = buff.casterlevel > 0 ? buff.casterlevel : GetLevel();
sbf->buff.bard_modifier = buff.instrument_mod;
sbf->buff.spellid = buff.spellid;
sbf->buff.duration = buff.ticsremaining;
if (buff.dot_rune)
sbf->buff.counters = buff.dot_rune;
else if (buff.magic_rune)
sbf->buff.counters = buff.magic_rune;
else if (buff.melee_rune)
sbf->buff.counters = buff.melee_rune;
else if (buff.counters)
sbf->buff.counters = buff.counters;
sbf->buff.player_id = buff.casterid;
sbf->buff.num_hits = buff.numhits;
sbf->buff.y = buff.caston_y;
sbf->buff.x = buff.caston_x;
sbf->buff.z = buff.caston_z;
sbf->slotid = slot;
sbf->bufffade = 0;
FastQueuePacket(&outapp);
}
void Client::SendBuffNumHitPacket(Buffs_Struct &buff, int slot)
{
// UF+ use this packet
if (ClientVersion() < EQ::versions::ClientVersion::UF)
return;
EQApplicationPacket *outapp = nullptr;
outapp = new EQApplicationPacket(OP_BuffCreate, sizeof(BuffIcon_Struct) + sizeof(BuffIconEntry_Struct));
BuffIcon_Struct *bi = (BuffIcon_Struct *)outapp->pBuffer;
bi->entity_id = GetID();
bi->count = 1;
bi->all_buffs = 0;
bi->tic_timer = tic_timer.GetRemainingTime();
bi->entries[0].buff_slot = slot;
bi->entries[0].spell_id = buff.spellid;
bi->entries[0].tics_remaining = buff.ticsremaining;
bi->entries[0].num_hits = buff.numhits;
strn0cpy(bi->entries[0].caster, buff.caster_name, 64);
bi->name_lengths = strlen(bi->entries[0].caster);
FastQueuePacket(&outapp);
}
void Mob::SendPetBuffsToClient()
{
// Don't really need this check, as it should be checked before this method is called, but it doesn't hurt
// too much to check again.
if(!(GetOwner() && GetOwner()->IsClient()))
return;
int PetBuffCount = 0;
auto outapp = new EQApplicationPacket(OP_PetBuffWindow, sizeof(PetBuff_Struct));
PetBuff_Struct* pbs=(PetBuff_Struct*)outapp->pBuffer;
memset(outapp->pBuffer,0,outapp->size);
pbs->petid=GetID();
int MaxSlots = GetMaxTotalSlots();
if(MaxSlots > PET_BUFF_COUNT)
MaxSlots = PET_BUFF_COUNT;
for(int buffslot = 0; buffslot < MaxSlots; buffslot++)
{
if(buffs[buffslot].spellid != SPELL_UNKNOWN) {
pbs->spellid[buffslot] = buffs[buffslot].spellid;
pbs->ticsremaining[buffslot] = buffs[buffslot].ticsremaining;
PetBuffCount++;
}
}
pbs->buffcount=PetBuffCount;
GetOwner()->CastToClient()->QueuePacket(outapp);
safe_delete(outapp);
}
void Mob::SendBuffsToClient(Client *c)
{
if(!c)
return;
if (c->ClientVersionBit() & EQ::versions::maskSoDAndLater)
{
EQApplicationPacket *outapp = MakeBuffsPacket();
c->FastQueuePacket(&outapp);
}
}
EQApplicationPacket *Mob::MakeBuffsPacket(bool for_target)
{
uint32 count = 0;
// for self we want all buffs, for target, we want to skip song window buffs
// since NPCs and pets don't have a song window, we still see it for them :P
uint32 buff_count = for_target ? GetMaxBuffSlots() : GetMaxTotalSlots();
for(int i = 0; i < buff_count; ++i)
{
if(buffs[i].spellid != SPELL_UNKNOWN)
{
++count;
}
}
EQApplicationPacket* outapp = nullptr;
//Create it for a targeting window, else create it for a create buff packet.
if(for_target)
{
outapp = new EQApplicationPacket(OP_TargetBuffs, sizeof(BuffIcon_Struct) + sizeof(BuffIconEntry_Struct) * count);
}
else
{
outapp = new EQApplicationPacket(OP_BuffCreate, sizeof(BuffIcon_Struct) + sizeof(BuffIconEntry_Struct) * count);
}
BuffIcon_Struct *buff = (BuffIcon_Struct*)outapp->pBuffer;
buff->entity_id = GetID();
buff->count = count;
buff->all_buffs = 1;
buff->tic_timer = tic_timer.GetRemainingTime();
// there are more types, the client doesn't seem to really care though. The others are also currently hard to fill in here ...
// (see comment in common/eq_packet_structs.h)
if (for_target)
buff->type = IsClient() ? 5 : 7;
else
buff->type = 0;
buff->name_lengths = 0; // hacky shit
uint32 index = 0;
for(int i = 0; i < buff_count; ++i)
{
if(buffs[i].spellid != SPELL_UNKNOWN)
{
buff->entries[index].buff_slot = i;
buff->entries[index].spell_id = buffs[i].spellid;
buff->entries[index].tics_remaining = buffs[i].ticsremaining;
buff->entries[index].num_hits = buffs[i].numhits;
strn0cpy(buff->entries[index].caster, buffs[i].caster_name, 64);
buff->name_lengths += strlen(buff->entries[index].caster);
++index;
}
}
return outapp;
}
void Mob::BuffModifyDurationBySpellID(uint16 spell_id, int32 newDuration)
{
int buff_count = GetMaxTotalSlots();
for(int i = 0; i < buff_count; ++i)
{
if (buffs[i].spellid == spell_id)
{
buffs[i].ticsremaining = newDuration;
if(IsClient())
{
CastToClient()->SendBuffDurationPacket(buffs[i], i);
}
}
}
}
int Client::GetCurrentBuffSlots() const
{
int numbuffs = 15;
// client does check spells and items
numbuffs += aabonuses.BuffSlotIncrease + spellbonuses.BuffSlotIncrease + itembonuses.BuffSlotIncrease;
if (GetLevel() > 70)
numbuffs++;
if (GetLevel() > 74)
numbuffs++;
return EQ::ClampUpper(numbuffs, EQ::spells::StaticLookup(m_ClientVersion)->LongBuffs);
}
int Client::GetCurrentSongSlots() const
{
return EQ::spells::StaticLookup(m_ClientVersion)->ShortBuffs; // AAs dont affect this
}
void Client::InitializeBuffSlots()
{
int max_slots = GetMaxTotalSlots();
buffs = new Buffs_Struct[max_slots];
for(int x = 0; x < max_slots; ++x)
{
buffs[x].spellid = SPELL_UNKNOWN;
buffs[x].UpdateClient = false;
}
current_buff_count = 0;
}
void Client::UninitializeBuffSlots()
{
safe_delete_array(buffs);
}
void NPC::InitializeBuffSlots()
{
int max_slots = GetMaxTotalSlots();
buffs = new Buffs_Struct[max_slots];
for (int x = 0; x < max_slots; ++x) {
buffs[x].spellid = SPELL_UNKNOWN;
buffs[x].UpdateClient = false;
}
current_buff_count = 0;
}
void NPC::UninitializeBuffSlots()
{
safe_delete_array(buffs);
}
void Client::SendSpellAnim(uint16 targetid, uint16 spell_id)
{
if (!targetid || !IsValidSpell(spell_id))
return;
EQApplicationPacket app(OP_Action, sizeof(Action_Struct));
Action_Struct* a = (Action_Struct*)app.pBuffer;
a->target = targetid;
a->source = this->GetID();
a->type = 231;
a->spell = spell_id;
a->hit_heading = GetHeading();
app.priority = 1;
entity_list.QueueCloseClients(this, &app, false, RuleI(Range, SpellParticles));
}
void Mob::CalcDestFromHeading(float heading, float distance, float MaxZDiff, float StartX, float StartY, float &dX, float &dY, float &dZ)
{
if (!distance) { return; }
if (!MaxZDiff) { MaxZDiff = 5; }
float ReverseHeading = 512 - heading;
float ConvertAngle = ReverseHeading * 360.0f / 512.0f;
if (ConvertAngle <= 270)
ConvertAngle = ConvertAngle + 90;
else
ConvertAngle = ConvertAngle - 270;
float Radian = ConvertAngle * (3.1415927f / 180.0f);
float CircleX = distance * std::cos(Radian);
float CircleY = distance * std::sin(Radian);
dX = CircleX + StartX;
dY = CircleY + StartY;
dZ = FindGroundZ(dX, dY, MaxZDiff);
}
void Mob::BeamDirectional(uint16 spell_id, int16 resist_adjust)
{
int maxtarget_count = 0;
bool beneficial_targets = false;
if (IsBeneficialSpell(spell_id) && IsClient())
beneficial_targets = true;
std::list<Mob *> targets_in_range;
entity_list.GetTargetsForConeArea(this, spells[spell_id].min_range, spells[spell_id].range,
spells[spell_id].range / 2, spells[spell_id].pcnpc_only_flag, targets_in_range);
auto iter = targets_in_range.begin();
float dX = 0;
float dY = 0;
float dZ = 0;
CalcDestFromHeading(GetHeading(), spells[spell_id].range, 5, GetX(), GetY(), dX, dY, dZ);
dZ = GetZ();
// FIND SLOPE: Put it into the form y = mx + b
float m = (dY - GetY()) / (dX - GetX());
float b = (GetY() * dX - dY * GetX()) / (dX - GetX());
while (iter != targets_in_range.end()) {
if (!(*iter) || (beneficial_targets && ((*iter)->IsNPC() && !(*iter)->IsPetOwnerClient())) ||
(*iter)->BehindMob(this, (*iter)->GetX(), (*iter)->GetY())) {
++iter;
continue;
}
if (IsNPC() && (*iter)->IsNPC()) {
auto fac = (*iter)->GetReverseFactionCon(this);
if (beneficial_targets) {
// only affect mobs we would assist.
if (!(fac <= FACTION_AMIABLE)) {
++iter;
continue;
}
} else {
// affect mobs that are on our hate list, or which have bad faction with us
if (!(CheckAggro(*iter) || fac == FACTION_THREATENLY || fac == FACTION_SCOWLS)) {
++iter;
continue;
}
}
}
//# shortest distance from line to target point
float d = std::abs((*iter)->GetY() - m * (*iter)->GetX() - b) / sqrt(m * m + 1);
if (d <= spells[spell_id].aoerange) {
if (CheckLosFN((*iter)) || spells[spell_id].npc_no_los) {
(*iter)->CalcSpellPowerDistanceMod(spell_id, 0, this);
SpellOnTarget(spell_id, (*iter), 0, true, resist_adjust);
maxtarget_count++;
}
// not sure if we need this check, but probably do, need to check if it should be default limited or not
if (spells[spell_id].aemaxtargets && maxtarget_count >= spells[spell_id].aemaxtargets)
return;
}
++iter;
}
}
void Mob::ConeDirectional(uint16 spell_id, int16 resist_adjust)
{
int maxtarget_count = 0;
bool beneficial_targets = false;
if (IsBeneficialSpell(spell_id) && IsClient())
beneficial_targets = true;
float heading = GetHeading() * 360.0f / 512.0f; // convert to degrees
float angle_start = spells[spell_id].directional_start + heading;
float angle_end = spells[spell_id].directional_end + heading;
while (angle_start > 360.0f)
angle_start -= 360.0f;
while (angle_end > 360.0f)
angle_end -= 360.0f;
std::list<Mob *> targets_in_range;
entity_list.GetTargetsForConeArea(this, spells[spell_id].min_range, spells[spell_id].aoerange,
spells[spell_id].aoerange / 2, spells[spell_id].pcnpc_only_flag, targets_in_range);
auto iter = targets_in_range.begin();
while (iter != targets_in_range.end()) {
if (!(*iter) || (beneficial_targets && ((*iter)->IsNPC() && !(*iter)->IsPetOwnerClient()))) {
++iter;
continue;
}
float heading_to_target =
(CalculateHeadingToTarget((*iter)->GetX(), (*iter)->GetY()) * 360.0f / 512.0f);
while (heading_to_target < 0.0f)
heading_to_target += 360.0f;
while (heading_to_target > 360.0f)
heading_to_target -= 360.0f;
if (IsNPC() && (*iter)->IsNPC()) {
auto fac = (*iter)->GetReverseFactionCon(this);
if (beneficial_targets) {
// only affect mobs we would assist.
if (!(fac <= FACTION_AMIABLE)) {
++iter;
continue;
}
} else {
// affect mobs that are on our hate list, or which have bad faction with us
if (!(CheckAggro(*iter) || fac == FACTION_THREATENLY || fac == FACTION_SCOWLS)) {
++iter;
continue;
}
}
}
if (angle_start > angle_end) {
if ((heading_to_target >= angle_start && heading_to_target <= 360.0f) ||
(heading_to_target >= 0.0f && heading_to_target <= angle_end)) {
if (CheckLosFN((*iter)) || spells[spell_id].npc_no_los) {
(*iter)->CalcSpellPowerDistanceMod(spell_id, 0, this);
SpellOnTarget(spell_id, (*iter), 0, true, resist_adjust);
maxtarget_count++;
}
}
} else {
if (heading_to_target >= angle_start && heading_to_target <= angle_end) {
if (CheckLosFN((*iter)) || spells[spell_id].npc_no_los) {
(*iter)->CalcSpellPowerDistanceMod(spell_id, 0, this);
SpellOnTarget(spell_id, (*iter), 0, true, resist_adjust);
maxtarget_count++;
}
}
}
// my SHM breath could hit all 5 dummies I could summon in arena
if (spells[spell_id].aemaxtargets && maxtarget_count >= spells[spell_id].aemaxtargets)
return;
++iter;
}
}
// duration in seconds
void Client::SetLinkedSpellReuseTimer(uint32 timer_id, uint32 duration)
{
if (timer_id > 19)
return;
LogSpells("Setting Linked Spell Reuse [{}] for [{}]", timer_id, duration);
GetPTimers().Start(pTimerLinkedSpellReuseStart + timer_id, duration);
auto outapp = new EQApplicationPacket(OP_LinkedReuse, sizeof(LinkedSpellReuseTimer_Struct));
auto lr = (LinkedSpellReuseTimer_Struct *)outapp->pBuffer;
lr->timer_id = timer_id;
lr->start_time = Timer::GetCurrentTime() / 1000;
lr->end_time = lr->start_time + duration;
FastQueuePacket(&outapp);
}
bool Client::IsLinkedSpellReuseTimerReady(uint32 timer_id)
{
if (timer_id > 19)
return true;
return GetPTimers().Expired(&database, pTimerLinkedSpellReuseStart + timer_id, false);
}
| 1 | 10,935 | We don't need to cast to client here. (well, from spell gem not AA etc) We should also make sure the charm is a casted spell before calling SendSpellBarEnable. | EQEmu-Server | cpp |
@@ -63,6 +63,7 @@ def define_violation(dbengine):
full_name = Column(String(1024))
inventory_data = Column(Text(16777215))
inventory_index_id = Column(String(256))
+ invocation_id = Column(DateTime())
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
rule_name = Column(String(256)) | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database access objects for Forseti Scanner. """
from collections import defaultdict
from datetime import datetime
import hashlib
import json
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import String
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import inspect
from sqlalchemy.orm import sessionmaker
from google.cloud.forseti.common.data_access import violation_map as vm
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.services import db
LOGGER = logger.get_logger(__name__)
# pylint: disable=no-member
def define_violation(dbengine):
"""Defines table class for violations.
A violation table will be created on a per-model basis.
Args:
dbengine (engine): sqlalchemy database engine
Returns:
ViolationAcccess: facade for accessing violations.
"""
# TODO: Determine if dbengine is really needed as a method arg here.
base = declarative_base()
violations_tablename = 'violations'
class Violation(base):
"""Row entry for a violation."""
__tablename__ = violations_tablename
id = Column(Integer, primary_key=True)
created_at = Column(DateTime())
full_name = Column(String(1024))
inventory_data = Column(Text(16777215))
inventory_index_id = Column(String(256))
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
rule_name = Column(String(256))
rule_index = Column(Integer, default=0)
violation_data = Column(Text)
violation_type = Column(String(256), nullable=False)
violation_hash = Column(String(256))
def __repr__(self):
"""String representation.
Returns:
str: string representation of the Violation row entry.
"""
string = ('<Violation(violation_type={}, resource_type={} '
'rule_name={})>')
return string.format(
self.violation_type, self.resource_type, self.rule_name)
class ViolationAccess(object):
"""Facade for violations, implement APIs against violations table."""
TBL_VIOLATIONS = Violation
def __init__(self, dbengine):
"""Constructor for the Violation Access.
Args:
dbengine (engine): sqlalchemy database engine
"""
self.engine = dbengine
self.violationmaker = self._create_violation_session()
def _create_violation_session(self):
"""Create a session to read from the models table.
Returns:
ScopedSessionmaker: A scoped session maker that will create
a session that is automatically released.
"""
return db.ScopedSessionMaker(
sessionmaker(
bind=self.engine,
expire_on_commit=False),
auto_commit=True)
def create(self, violations, inventory_index_id):
"""Save violations to the db table.
Args:
violations (list): A list of violations.
inventory_index_id (str): Id of the inventory index.
"""
with self.violationmaker() as session:
created_at = datetime.utcnow()
for violation in violations:
violation_hash = _create_violation_hash(
violation.get('full_name', ''),
violation.get('inventory_data', ''),
violation.get('violation_data', ''),
)
violation = self.TBL_VIOLATIONS(
inventory_index_id=inventory_index_id,
resource_id=violation.get('resource_id'),
resource_type=violation.get('resource_type'),
full_name=violation.get('full_name'),
rule_name=violation.get('rule_name'),
rule_index=violation.get('rule_index'),
violation_type=violation.get('violation_type'),
violation_data=json.dumps(
violation.get('violation_data')),
inventory_data=violation.get('inventory_data'),
violation_hash=violation_hash,
created_at=created_at
)
session.add(violation)
def list(self, inventory_index_id=None):
"""List all violations from the db table.
Args:
inventory_index_id (str): Id of the inventory index.
Returns:
list: List of Violation row entry objects.
"""
with self.violationmaker() as session:
if inventory_index_id:
return (
session.query(self.TBL_VIOLATIONS)
.filter(
self.TBL_VIOLATIONS.inventory_index_id ==
inventory_index_id)
.all())
return (
session.query(self.TBL_VIOLATIONS)
.all())
base.metadata.create_all(dbengine)
return ViolationAccess
# pylint: disable=invalid-name
def convert_sqlalchemy_object_to_dict(sqlalchemy_obj):
"""Convert a sqlalchemy row/record object to a dictionary.
Args:
sqlalchemy_obj (sqlalchemy_object): A sqlalchemy row/record object
Returns:
dict: A dict of sqlalchemy object's attributes.
"""
return {c.key: getattr(sqlalchemy_obj, c.key)
for c in inspect(sqlalchemy_obj).mapper.column_attrs}
def map_by_resource(violation_rows):
"""Create a map of violation types to violations of that resource.
Args:
violation_rows (list): A list of dict of violation data.
Returns:
dict: A dict of violation types mapped to the list of corresponding
violation types, i.e. { resource => [violation_data...] }.
"""
# The defaultdict makes it easy to add a value to a key without having
# to check if the key exists.
v_by_type = defaultdict(list)
for v_data in violation_rows:
try:
v_data['violation_data'] = json.loads(v_data['violation_data'])
v_data['inventory_data'] = json.loads(v_data['inventory_data'])
except ValueError:
LOGGER.warn('Invalid violation data, unable to parse json for %s',
v_data['violation_data'])
v_resource = vm.VIOLATION_RESOURCES.get(v_data['violation_type'])
if v_resource:
v_by_type[v_resource].append(v_data)
return dict(v_by_type)
def _create_violation_hash(violation_full_name, inventory_data, violation_data):
"""Create a hash of violation data.
Args:
violation_full_name (str): The full name of the violation.
inventory_data (str): The inventory data.
violation_data (dict): A violation.
Returns:
str: The resulting hex digest or '' if we can't successfully create
a hash.
"""
# TODO: Intelligently choose from hashlib.algorithms_guaranteed if our
# desired one is not available.
algorithm = 'sha512'
try:
violation_hash = hashlib.new(algorithm)
except ValueError as e:
LOGGER.error('Cannot create hash for a violation with algorithm: '
'%s\n%s', algorithm, e)
return ''
try:
violation_hash.update(
violation_full_name +
inventory_data +
json.dumps(violation_data, sort_keys=True)
)
except TypeError as e:
LOGGER.error('Cannot create hash for a violation: %s\n%s',
violation_full_name, e)
return ''
return violation_hash.hexdigest()
| 1 | 29,206 | This is not an `id`. This should either be `invocation_time` or `invocated_at` to be consistent with what we are using elsewhere, and also to better rerflect the column's DateTime type. Also, within the context of this table, there is no idea of what `invocation` is. I know that `scanner` may not be future-proof, but we need to find something more descriptive. Perhaps one possibility is to call this `audit_time`. | forseti-security-forseti-security | py |
@@ -412,6 +412,18 @@ func (tx Transaction) TxAmount() basics.MicroAlgos {
}
}
+// GetReceiverAddress returns the address of the receiver. If the transaction has no receiver, it returns the empty address.
+func (tx Transaction) GetReceiverAddress() basics.Address {
+ switch tx.Type {
+ case protocol.PaymentTx:
+ return tx.PaymentTxnFields.Receiver
+ case protocol.AssetTransferTx:
+ return tx.AssetTransferTxnFields.AssetReceiver
+ default:
+ return basics.Address{}
+ }
+}
+
// EstimateEncodedSize returns the estimated encoded size of the transaction including the signature.
// This function is to be used for calculating the fee
func (tx Transaction) EstimateEncodedSize() int { | 1 | // Copyright (C) 2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package transactions
import (
"fmt"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
)
// Txid is a hash used to uniquely identify individual transactions
type Txid crypto.Digest
// String converts txid to a pretty-printable string
func (txid Txid) String() string {
return fmt.Sprintf("%v", crypto.Digest(txid))
}
// UnmarshalText initializes the Address from an array of bytes.
func (txid *Txid) UnmarshalText(text []byte) error {
d, err := crypto.DigestFromString(string(text))
*txid = Txid(d)
return err
}
// SpecialAddresses holds addresses with nonstandard properties.
type SpecialAddresses struct {
FeeSink basics.Address
RewardsPool basics.Address
}
// Balances allow to move MicroAlgos from one address to another and to update balance records, or to access and modify individual balance records
// After a call to Put (or Move), future calls to Get or Move will reflect the updated balance record(s)
type Balances interface {
// Get looks up the balance record for an address
// If the account is known to be empty, then err should be nil and the returned balance record should have the given address and empty AccountData
// withPendingRewards specifies whether pending rewards should be applied.
// A non-nil error means the lookup is impossible (e.g., if the database doesn't have necessary state anymore)
Get(addr basics.Address, withPendingRewards bool) (basics.BalanceRecord, error)
// GetAssetCreator gets the address of the account whose balance record
// contains the asset params
GetAssetCreator(aidx basics.AssetIndex) (basics.Address, error)
Put(basics.BalanceRecord) error
// Move MicroAlgos from one account to another, doing all necessary overflow checking (convenience method)
// TODO: Does this need to be part of the balances interface, or can it just be implemented here as a function that calls Put and Get?
Move(src, dst basics.Address, amount basics.MicroAlgos, srcRewards *basics.MicroAlgos, dstRewards *basics.MicroAlgos) error
// Balances correspond to a Round, which mean that they also correspond
// to a ConsensusParams. This returns those parameters.
ConsensusParams() config.ConsensusParams
}
// Header captures the fields common to every transaction type.
type Header struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Sender basics.Address `codec:"snd"`
Fee basics.MicroAlgos `codec:"fee"`
FirstValid basics.Round `codec:"fv"`
LastValid basics.Round `codec:"lv"`
Note []byte `codec:"note"` // Uniqueness or app-level data about txn
GenesisID string `codec:"gen"`
GenesisHash crypto.Digest `codec:"gh"`
// Group specifies that this transaction is part of a
// transaction group (and, if so, specifies the hash
// of a TxGroup).
Group crypto.Digest `codec:"grp"`
// Lease enforces mutual exclusion of transactions. If this field is
// nonzero, then once the transaction is confirmed, it acquires the
// lease identified by the (Sender, Lease) pair of the transaction until
// the LastValid round passes. While this transaction possesses the
// lease, no other transaction specifying this lease can be confirmed.
Lease [32]byte `codec:"lx"`
}
// Transaction describes a transaction that can appear in a block.
type Transaction struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
// Type of transaction
Type protocol.TxType `codec:"type"`
// Common fields for all types of transactions
Header
// Fields for different types of transactions
KeyregTxnFields
PaymentTxnFields
AssetConfigTxnFields
AssetTransferTxnFields
AssetFreezeTxnFields
// The transaction's Txid is computed when we decode,
// and cached here, to avoid needlessly recomputing it.
cachedTxid Txid
// The valid flag indicates if this transaction was
// correctly decoded.
valid bool
}
// ApplyData contains information about the transaction's execution.
type ApplyData struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
// Closing amount for transaction.
ClosingAmount basics.MicroAlgos `codec:"ca"`
// Rewards applied to the Sender, Receiver, and CloseRemainderTo accounts.
SenderRewards basics.MicroAlgos `codec:"rs"`
ReceiverRewards basics.MicroAlgos `codec:"rr"`
CloseRewards basics.MicroAlgos `codec:"rc"`
}
// TxGroup describes a group of transactions that must appear
// together in a specific order in a block.
type TxGroup struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
// TxGroupHashes specifies a list of hashes of transactions that must appear
// together, sequentially, in a block in order for the group to be
// valid. Each hash in the list is a hash of a transaction with
// the `Group` field omitted.
TxGroupHashes []crypto.Digest `codec:"txlist"`
}
// ToBeHashed implements the crypto.Hashable interface.
func (tg TxGroup) ToBeHashed() (protocol.HashID, []byte) {
return protocol.TxGroup, protocol.Encode(tg)
}
// ToBeHashed implements the crypto.Hashable interface.
func (tx Transaction) ToBeHashed() (protocol.HashID, []byte) {
return protocol.Transaction, protocol.Encode(tx)
}
func (tx *Transaction) computeID() Txid {
return Txid(crypto.HashObj(tx))
}
// InitCaches initializes caches inside of Transaction.
func (tx *Transaction) InitCaches() {
if !tx.valid {
tx.cachedTxid = tx.computeID()
tx.valid = true
}
}
// ResetCaches clears caches inside of Transaction, if the Transaction was modified.
func (tx *Transaction) ResetCaches() {
tx.valid = false
}
// ID returns the Txid (i.e., hash) of the transaction.
// For efficiency this is precomputed when the Transaction is created.
func (tx Transaction) ID() Txid {
if tx.valid {
return tx.cachedTxid
}
return tx.computeID()
}
// Sign signs a transaction using a given Account's secrets.
func (tx Transaction) Sign(secrets *crypto.SignatureSecrets) SignedTxn {
sig := secrets.Sign(tx)
s := SignedTxn{
Txn: tx,
Sig: sig,
}
s.InitCaches()
return s
}
// Src returns the address that posted the transaction.
// This is the account that pays the associated Fee.
func (tx Header) Src() basics.Address {
return tx.Sender
}
// TxFee returns the fee associated with this transaction.
func (tx Header) TxFee() basics.MicroAlgos {
return tx.Fee
}
// Alive checks to see if the transaction is still alive (can be applied) at the specified Round.
func (tx Header) Alive(tc TxnContext) error {
// Check round validity
round := tc.Round()
if round < tx.FirstValid || round > tx.LastValid {
return TxnDeadError{
Round: round,
FirstValid: tx.FirstValid,
LastValid: tx.LastValid,
}
}
// Check genesis ID
proto := tc.ConsensusProtocol()
genesisID := tc.GenesisID()
if tx.GenesisID != "" && tx.GenesisID != genesisID {
return fmt.Errorf("tx.GenesisID <%s> does not match expected <%s>",
tx.GenesisID, genesisID)
}
// Check genesis hash
if proto.SupportGenesisHash {
genesisHash := tc.GenesisHash()
if tx.GenesisHash != (crypto.Digest{}) && tx.GenesisHash != genesisHash {
return fmt.Errorf("tx.GenesisHash <%s> does not match expected <%s>",
tx.GenesisHash, genesisHash)
}
if proto.RequireGenesisHash && tx.GenesisHash == (crypto.Digest{}) {
return fmt.Errorf("required tx.GenesisHash is missing")
}
} else {
if tx.GenesisHash != (crypto.Digest{}) {
return fmt.Errorf("tx.GenesisHash <%s> not allowed", tx.GenesisHash)
}
}
return nil
}
// MatchAddress checks if the transaction touches a given address.
func (tx Transaction) MatchAddress(addr basics.Address, spec SpecialAddresses, proto config.ConsensusParams) bool {
for _, candidate := range tx.RelevantAddrs(spec, proto) {
if addr == candidate {
return true
}
}
return false
}
// WellFormed checks that the transaction looks reasonable on its own (but not necessarily valid against the actual ledger). It does not check signatures.
func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusParams) error {
switch tx.Type {
case protocol.PaymentTx:
// in case that the fee sink is spending, check that this spend is to a valid address
err := tx.checkSpender(tx.Header, spec, proto)
if err != nil {
return err
}
case protocol.KeyRegistrationTx:
// check that, if this tx is marking an account nonparticipating,
// it supplies no key (as though it were trying to go offline)
if tx.KeyregTxnFields.Nonparticipation {
if !proto.SupportBecomeNonParticipatingTransactions {
// if the transaction has the Nonparticipation flag high, but the protocol does not support
// that type of transaction, it is invalid.
return fmt.Errorf("transaction tries to mark an account as nonparticipating, but that transaction is not supported")
}
suppliesNullKeys := tx.KeyregTxnFields.VotePK == crypto.OneTimeSignatureVerifier{} || tx.KeyregTxnFields.SelectionPK == crypto.VRFVerifier{}
if !suppliesNullKeys {
return fmt.Errorf("transaction tries to register keys to go online, but nonparticipatory flag is set")
}
}
case protocol.AssetConfigTx:
if !proto.Asset {
return fmt.Errorf("asset transaction not supported")
}
case protocol.AssetTransferTx:
if !proto.Asset {
return fmt.Errorf("asset transaction not supported")
}
case protocol.AssetFreezeTx:
if !proto.Asset {
return fmt.Errorf("asset transaction not supported")
}
default:
return fmt.Errorf("unknown tx type %v", tx.Type)
}
nonZeroFields := make(map[protocol.TxType]bool)
if tx.PaymentTxnFields != (PaymentTxnFields{}) {
nonZeroFields[protocol.PaymentTx] = true
}
if tx.KeyregTxnFields != (KeyregTxnFields{}) {
nonZeroFields[protocol.KeyRegistrationTx] = true
}
if tx.AssetConfigTxnFields != (AssetConfigTxnFields{}) {
nonZeroFields[protocol.AssetConfigTx] = true
}
if tx.AssetTransferTxnFields != (AssetTransferTxnFields{}) {
nonZeroFields[protocol.AssetTransferTx] = true
}
if tx.AssetFreezeTxnFields != (AssetFreezeTxnFields{}) {
nonZeroFields[protocol.AssetFreezeTx] = true
}
for t, nonZero := range nonZeroFields {
if nonZero && t != tx.Type {
return fmt.Errorf("transaction of type %v has non-zero fields for type %v", tx.Type, t)
}
}
if tx.Fee.LessThan(basics.MicroAlgos{Raw: proto.MinTxnFee}) {
return makeMinFeeErrorf("transaction had fee %v, which is less than the minimum %v", tx.Fee, proto.MinTxnFee)
}
if tx.LastValid < tx.FirstValid {
return fmt.Errorf("transaction invalid range (%v--%v)", tx.FirstValid, tx.LastValid)
}
if tx.LastValid-tx.FirstValid > basics.Round(proto.MaxTxnLife) {
return fmt.Errorf("transaction window size excessive (%v--%v)", tx.FirstValid, tx.LastValid)
}
if len(tx.Note) > proto.MaxTxnNoteBytes {
return fmt.Errorf("transaction note too big: %d > %d", len(tx.Note), proto.MaxTxnNoteBytes)
}
if len(tx.AssetConfigTxnFields.AssetParams.AssetName) > proto.MaxAssetNameBytes {
return fmt.Errorf("transaction asset name too big: %d > %d", len(tx.AssetConfigTxnFields.AssetParams.AssetName), proto.MaxAssetNameBytes)
}
if len(tx.AssetConfigTxnFields.AssetParams.UnitName) > proto.MaxAssetUnitNameBytes {
return fmt.Errorf("transaction asset unit name too big: %d > %d", len(tx.AssetConfigTxnFields.AssetParams.UnitName), proto.MaxAssetUnitNameBytes)
}
if len(tx.AssetConfigTxnFields.AssetParams.URL) > proto.MaxAssetURLBytes {
return fmt.Errorf("transaction asset url too big: %d > %d", len(tx.AssetConfigTxnFields.AssetParams.URL), proto.MaxAssetURLBytes)
}
if tx.AssetConfigTxnFields.AssetParams.Decimals > proto.MaxAssetDecimals {
return fmt.Errorf("transaction asset decimals is too high (max is %d)", proto.MaxAssetDecimals)
}
if tx.Sender == spec.RewardsPool {
// this check is just to be safe, but reaching here seems impossible, since it requires computing a preimage of rwpool
return fmt.Errorf("transaction from incentive pool is invalid")
}
if tx.Sender == (basics.Address{}) {
return fmt.Errorf("transaction cannot have zero sender")
}
if !proto.SupportTransactionLeases && (tx.Lease != [32]byte{}) {
return fmt.Errorf("transaction tried to acquire lease %v but protocol does not support transaction leases", tx.Lease)
}
if !proto.SupportTxGroups && (tx.Group != crypto.Digest{}) {
return fmt.Errorf("transaction has group but groups not yet enabled")
}
return nil
}
// Aux returns the note associated with this transaction
func (tx Header) Aux() []byte {
return tx.Note
}
// First returns the first round this transaction is valid
func (tx Header) First() basics.Round {
return tx.FirstValid
}
// Last returns the first round this transaction is valid
func (tx Header) Last() basics.Round {
return tx.LastValid
}
// RelevantAddrs returns the addresses whose balance records this transaction will need to access.
// The header's default is to return just the sender and the fee sink.
func (tx Transaction) RelevantAddrs(spec SpecialAddresses, proto config.ConsensusParams) []basics.Address {
addrs := []basics.Address{tx.Sender, spec.FeeSink}
switch tx.Type {
case protocol.PaymentTx:
addrs = append(addrs, tx.PaymentTxnFields.Receiver)
if tx.PaymentTxnFields.CloseRemainderTo != (basics.Address{}) {
addrs = append(addrs, tx.PaymentTxnFields.CloseRemainderTo)
}
case protocol.AssetTransferTx:
addrs = append(addrs, tx.AssetTransferTxnFields.AssetReceiver)
if tx.AssetTransferTxnFields.AssetCloseTo != (basics.Address{}) {
addrs = append(addrs, tx.AssetTransferTxnFields.AssetCloseTo)
}
}
return addrs
}
// TxAmount returns the amount paid to the recipient in this payment
func (tx Transaction) TxAmount() basics.MicroAlgos {
switch tx.Type {
case protocol.PaymentTx:
return tx.PaymentTxnFields.Amount
default:
return basics.MicroAlgos{Raw: 0}
}
}
// EstimateEncodedSize returns the estimated encoded size of the transaction including the signature.
// This function is to be used for calculating the fee
func (tx Transaction) EstimateEncodedSize() int {
var seed crypto.Seed
crypto.RandBytes(seed[:])
keys := crypto.GenerateSignatureSecrets(seed)
stx := tx.Sign(keys)
return stx.GetEncodedLength()
}
// Apply changes the balances according to this transaction.
func (tx Transaction) Apply(balances Balances, spec SpecialAddresses, ctr uint64) (ad ApplyData, err error) {
params := balances.ConsensusParams()
// move fee to pool
err = balances.Move(tx.Sender, spec.FeeSink, tx.Fee, &ad.SenderRewards, nil)
if err != nil {
return
}
switch tx.Type {
case protocol.PaymentTx:
err = tx.PaymentTxnFields.apply(tx.Header, balances, spec, &ad)
case protocol.KeyRegistrationTx:
err = tx.KeyregTxnFields.apply(tx.Header, balances, spec, &ad)
case protocol.AssetConfigTx:
err = tx.AssetConfigTxnFields.apply(tx.Header, balances, spec, &ad, ctr)
case protocol.AssetTransferTx:
err = tx.AssetTransferTxnFields.apply(tx.Header, balances, spec, &ad)
case protocol.AssetFreezeTx:
err = tx.AssetFreezeTxnFields.apply(tx.Header, balances, spec, &ad)
default:
err = fmt.Errorf("Unknown transaction type %v", tx.Type)
}
// If the protocol does not support rewards in ApplyData,
// clear them out.
if !params.RewardsInApplyData {
ad.SenderRewards = basics.MicroAlgos{}
ad.ReceiverRewards = basics.MicroAlgos{}
ad.CloseRewards = basics.MicroAlgos{}
}
return
}
// TxnContext describes the context in which a transaction can appear
// (pretty much, a block, but we don't have the definition of a block
// here, since that would be a circular dependency). This is used to
// decide if a transaction is alive or not.
type TxnContext interface {
Round() basics.Round
ConsensusProtocol() config.ConsensusParams
GenesisID() string
GenesisHash() crypto.Digest
}
// ExplicitTxnContext is a struct that implements TxnContext with
// explicit fields for everything.
type ExplicitTxnContext struct {
ExplicitRound basics.Round
Proto config.ConsensusParams
GenID string
GenHash crypto.Digest
}
// Round implements the TxnContext interface
func (tc ExplicitTxnContext) Round() basics.Round {
return tc.ExplicitRound
}
// ConsensusProtocol implements the TxnContext interface
func (tc ExplicitTxnContext) ConsensusProtocol() config.ConsensusParams {
return tc.Proto
}
// GenesisID implements the TxnContext interface
func (tc ExplicitTxnContext) GenesisID() string {
return tc.GenID
}
// GenesisHash implements the TxnContext interface
func (tc ExplicitTxnContext) GenesisHash() crypto.Digest {
return tc.GenHash
}
| 1 | 37,197 | should the 'Receiver' be the clawback address in case of clawback transaction? | algorand-go-algorand | go |
@@ -19,10 +19,16 @@ package command
import (
"errors"
"flag"
-
"github.com/spf13/cobra"
)
+const (
+ // VolumeAPIPath is the api path to get volume information
+ VolumeAPIPath = "/latest/volumes/"
+ controllerStatusOk = "running"
+ volumeStatusOK = "Running"
+)
+
var (
volumeCommandHelpText = `
The following commands helps in operating a Volume such as create, list, and so on. | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"errors"
"flag"
"github.com/spf13/cobra"
)
var (
volumeCommandHelpText = `
The following commands helps in operating a Volume such as create, list, and so on.
Usage: mayactl volume <subcommand> [options] [args]
Examples:
# Create a Volume:
$ mayactl volume create --volname <vol> --size <size>
# List Volumes:
$ mayactl volume list
# Delete a Volume:
$ mayactl volume delete --volname <vol>
# Delete a Volume created in 'test' namespace:
$ mayactl volume delete --volname <vol> --namespace test
# Statistics of a Volume:
$ mayactl volume stats --volname <vol>
# Statistics of a Volume created in 'test' namespace:
$ mayactl volume stats --volname <vol> --namespace test
# Info of a Volume:
$ mayactl volume info --volname <vol>
# Info of a Volume created in 'test' namespace:
$ mayactl volume info --volname <vol> --namespace test
`
options = &CmdVolumeOptions{
namespace: "default",
}
)
// CmdVolumeOptions stores information of volume being operated
type CmdVolumeOptions struct {
volName string
sourceVolumeName string
snapshotName string
size string
namespace string
json string
}
// NewCmdVolume provides options for managing OpenEBS Volume
func NewCmdVolume() *cobra.Command {
cmd := &cobra.Command{
Use: "volume",
Short: "Provides operations related to a Volume",
Long: volumeCommandHelpText,
}
cmd.AddCommand(
NewCmdVolumeCreate(),
NewCmdVolumesList(),
NewCmdVolumeDelete(),
NewCmdVolumeStats(),
NewCmdVolumeInfo(),
)
cmd.PersistentFlags().StringVarP(&options.namespace, "namespace", "n", options.namespace,
"namespace name, required if volume is not in the default namespace")
cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)
flag.CommandLine.Parse([]string{})
return cmd
}
// Validate verifies whether a volume name,source name or snapshot name is provided or not followed by
// stats command. It returns nil and proceeds to execute the command if there is
// no error and returns an error if it is missing.
func (c *CmdVolumeOptions) Validate(cmd *cobra.Command, snapshotnameverify, sourcenameverify, volnameverify bool) error {
if snapshotnameverify {
if len(c.snapshotName) == 0 {
return errors.New("--snapname is missing. Please provide a snapshotname")
}
}
if sourcenameverify {
if len(c.sourceVolumeName) == 0 {
return errors.New("--sourcevol is missing. Please specify a sourcevolumename")
}
}
if volnameverify {
if len(c.volName) == 0 {
return errors.New("--volname is missing. Please specify a unique volumename")
}
}
return nil
}
| 1 | 9,140 | @ashishranjan738 -- Where are these consts used? in which pkg? | openebs-maya | go |
@@ -81,12 +81,12 @@ autodoc_default_options = {
"show-inheritance": True,
}
+# Generate autosummary pages. Output should be set with: `:toctree: pythonapi/`
+autosummary_generate = ['Python-API.rst']
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
-# Generate autosummary pages. Output should be set with: `:toctree: pythonapi/`
-autosummary_generate = True
-
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md'] | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# LightGBM documentation build configuration file, created by
# sphinx-quickstart on Thu May 4 14:30:58 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute.
"""Sphinx configuration file."""
import datetime
import os
import sys
import sphinx
from docutils.parsers.rst import Directive
from sphinx.errors import VersionRequirementError
from subprocess import PIPE, Popen
CURR_PATH = os.path.abspath(os.path.dirname(__file__))
LIB_PATH = os.path.join(CURR_PATH, os.path.pardir, 'python-package')
sys.path.insert(0, LIB_PATH)
# -- mock out modules
try:
from unittest.mock import Mock # Python 3.x
except ImportError:
from mock import Mock # Python 2.x
MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse',
'sklearn', 'matplotlib', 'pandas', 'graphviz']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
class IgnoredDirective(Directive):
"""Stub for unknown directives."""
has_content = True
def run(self):
"""Do nothing."""
return []
# -- General configuration ------------------------------------------------
os.environ['LIGHTGBM_BUILD_DOC'] = '1'
C_API = os.environ.get('C_API', '').lower().strip() != 'no'
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3' # Due to sphinx.ext.napoleon
if needs_sphinx > sphinx.__version__:
message = 'This project needs at least Sphinx v%s' % needs_sphinx
raise VersionRequirementError(message)
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
autodoc_default_flags = ['members', 'inherited-members', 'show-inheritance']
autodoc_default_options = {
"members": True,
"inherited-members": True,
"show-inheritance": True,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Generate autosummary pages. Output should be set with: `:toctree: pythonapi/`
autosummary_generate = True
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'LightGBM'
copyright = '%s, Microsoft Corporation' % str(datetime.datetime.now().year)
author = 'Microsoft Corporation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
with open(os.path.join(CURR_PATH, os.path.pardir, 'VERSION.txt'), 'r') as f:
# The short X.Y version.
version = f.read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Both the class' and the __init__ method's docstring are concatenated and inserted.
autoclass_content = 'class'
# -- Configuration for C API docs generation ------------------------------
if C_API:
extensions.extend([
'breathe',
])
breathe_projects = {
"LightGBM": os.path.join(CURR_PATH, 'doxyoutput', 'xml')
}
breathe_default_project = "LightGBM"
breathe_domain_by_extension = {
"h": "c",
}
breathe_show_define_initializer = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'includehidden': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LightGBMdoc'
def generate_doxygen_xml(app):
"""Generate XML documentation for C API by Doxygen.
Parameters
----------
app : object
The application object representing the Sphinx process.
"""
doxygen_args = [
"INPUT={}".format(os.path.join(CURR_PATH, os.path.pardir,
'include', 'LightGBM', 'c_api.h')),
"OUTPUT_DIRECTORY={}".format(os.path.join(CURR_PATH, 'doxyoutput')),
"GENERATE_HTML=NO",
"GENERATE_LATEX=NO",
"GENERATE_XML=YES",
"XML_OUTPUT=xml",
"XML_PROGRAMLISTING=YES",
r'ALIASES="rst=\verbatim embed:rst:leading-asterisk"',
r'ALIASES+="endrst=\endverbatim"',
"ENABLE_PREPROCESSING=YES",
"MACRO_EXPANSION=YES",
"EXPAND_ONLY_PREDEF=NO",
"SKIP_FUNCTION_MACROS=NO",
"SORT_BRIEF_DOCS=YES",
"WARN_AS_ERROR=YES",
]
doxygen_input = '\n'.join(doxygen_args)
is_py3 = sys.version[0] == "3"
if is_py3:
doxygen_input = bytes(doxygen_input, "utf-8")
if not os.path.exists(os.path.join(CURR_PATH, 'doxyoutput')):
os.makedirs(os.path.join(CURR_PATH, 'doxyoutput'))
try:
# Warning! The following code can cause buffer overflows on RTD.
# Consider suppressing output completely if RTD project silently fails.
# Refer to https://github.com/svenevs/exhale
# /blob/fe7644829057af622e467bb529db6c03a830da99/exhale/deploy.py#L99-L111
process = Popen(["doxygen", "-"],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate(doxygen_input)
output = '\n'.join([i.decode('utf-8') if is_py3 else i
for i in (stdout, stderr) if i is not None])
if process.returncode != 0:
raise RuntimeError(output)
else:
print(output)
except BaseException as e:
raise Exception("An error has occurred while executing Doxygen\n" + str(e))
def setup(app):
"""Add new elements at Sphinx initialization time.
Parameters
----------
app : object
The application object representing the Sphinx process.
"""
if C_API:
app.connect("builder-inited", generate_doxygen_xml)
else:
app.add_directive('doxygenfile', IgnoredDirective)
add_js_file = getattr(app, 'add_js_file', False) or app.add_javascript
add_js_file("js/script.js")
| 1 | 20,689 | The only change I would suggest is moving these lines back below the `templates_path` variable to keep the diffs smaller. | microsoft-LightGBM | cpp |
@@ -1862,8 +1862,8 @@ func (fbo *folderBranchOps) unembedBlockChanges(
md.AddRefBytes(uint64(info.EncodedSize))
md.AddDiskUsage(uint64(info.EncodedSize))
- changes.Info = info
md.data.cachedChanges = *changes
+ changes.Info = info
changes.Ops = nil
return nil
} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfssync"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// mdReqType indicates whether an operation makes MD modifications or not
type mdReqType int
const (
// A read request that doesn't need an identify to be
// performed.
mdReadNoIdentify mdReqType = iota
// A read request that needs an identify to be performed (if
// it hasn't been already).
mdReadNeedIdentify
// A write request.
mdWrite
// A rekey request. Doesn't need an identify to be performed, as
// a rekey does its own (finer-grained) identifies.
mdRekey
)
type branchType int
const (
standard branchType = iota // an online, read-write branch
archive // an online, read-only branch
offline // an offline, read-write branch
archiveOffline // an offline, read-only branch
)
// Constants used in this file. TODO: Make these configurable?
const (
// MaxBlockSizeBytesDefault is the default maximum block size for KBFS.
// 512K blocks by default, block changes embedded max == 8K.
// Block size was chosen somewhat arbitrarily by trying to
// minimize the overall size of the history written by a user when
// appending 1KB writes to a file, up to a 1GB total file. Here
// is the output of a simple script that approximates that
// calculation:
//
// Total history size for 0065536-byte blocks: 1134341128192 bytes
// Total history size for 0131072-byte blocks: 618945052672 bytes
// Total history size for 0262144-byte blocks: 412786622464 bytes
// Total history size for 0524288-byte blocks: 412786622464 bytes
// Total history size for 1048576-byte blocks: 618945052672 bytes
// Total history size for 2097152-byte blocks: 1134341128192 bytes
// Total history size for 4194304-byte blocks: 2216672886784 bytes
MaxBlockSizeBytesDefault = 512 << 10
// Maximum number of blocks that can be sent in parallel
maxParallelBlockPuts = 100
// Maximum number of blocks that can be fetched in parallel
maxParallelBlockGets = 10
// Max response size for a single DynamoDB query is 1MB.
maxMDsAtATime = 10
// Time between checks for dirty files to flush, in case Sync is
// never called.
secondsBetweenBackgroundFlushes = 10
// Cap the number of times we retry after a recoverable error
maxRetriesOnRecoverableErrors = 10
// When the number of dirty bytes exceeds this level, force a sync.
dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault
// The timeout for any background task.
backgroundTaskTimeout = 1 * time.Minute
// If it's been more than this long since our last update, check
// the current head before downloading all of the new revisions.
fastForwardTimeThresh = 15 * time.Minute
// If there are more than this many new revisions, fast forward
// rather than downloading them all.
fastForwardRevThresh = 50
)
type fboMutexLevel mutexLevel
const (
fboMDWriter fboMutexLevel = 1
fboHead fboMutexLevel = 2
fboBlock fboMutexLevel = 3
)
func (o fboMutexLevel) String() string {
switch o {
case fboMDWriter:
return "mdWriterLock"
case fboHead:
return "headLock"
case fboBlock:
return "blockLock"
default:
return fmt.Sprintf("Invalid fboMutexLevel %d", int(o))
}
}
func fboMutexLevelToString(o mutexLevel) string {
return (fboMutexLevel(o)).String()
}
// Rules for working with lockState in FBO:
//
// - Every "execution flow" (i.e., program flow that happens
// sequentially) needs its own lockState object. This usually means
// that each "public" FBO method does:
//
// lState := makeFBOLockState()
//
// near the top.
//
// - Plumb lState through to all functions that hold any of the
// relevant locks, or are called under those locks.
//
// This way, violations of the lock hierarchy will be detected at
// runtime.
func makeFBOLockState() *lockState {
return makeLevelState(fboMutexLevelToString)
}
// blockLock is just like a sync.RWMutex, but with an extra operation
// (DoRUnlockedIfPossible).
type blockLock struct {
leveledRWMutex
locked bool
}
func (bl *blockLock) Lock(lState *lockState) {
bl.leveledRWMutex.Lock(lState)
bl.locked = true
}
func (bl *blockLock) Unlock(lState *lockState) {
bl.locked = false
bl.leveledRWMutex.Unlock(lState)
}
// DoRUnlockedIfPossible must be called when r- or w-locked. If
// r-locked, r-unlocks, runs the given function, and r-locks after
// it's done. Otherwise, just runs the given function.
func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) {
if !bl.locked {
bl.RUnlock(lState)
defer bl.RLock(lState)
}
f(lState)
}
// folderBranchOps implements the KBFSOps interface for a specific
// branch of a specific folder. It is go-routine safe for operations
// within the folder.
//
// We use locks to protect against multiple goroutines accessing the
// same folder-branch. The goal with our locking strategy is maximize
// concurrent access whenever possible. See design/state_machine.md
// for more details. There are three important locks:
//
// 1) mdWriterLock: Any "remote-sync" operation (one which modifies the
// folder's metadata) must take this lock during the entirety of
// its operation, to avoid forking the MD.
//
// 2) headLock: This is a read/write mutex. It must be taken for
// reading before accessing any part of the current head MD. It
// should be taken for the shortest time possible -- that means in
// general that it should be taken, and the MD copied to a
// goroutine-local variable, and then it can be released.
// Remote-sync operations should take it for writing after pushing
// all of the blocks and MD to the KBFS servers (i.e., all network
// accesses), and then hold it until after all notifications have
// been fired, to ensure that no concurrent "local" operations ever
// see inconsistent state locally.
//
// 3) blockLock: This too is a read/write mutex. It must be taken for
// reading before accessing any blocks in the block cache that
// belong to this folder/branch. This includes checking their
// dirty status. It should be taken for the shortest time possible
// -- that means in general it should be taken, and then the blocks
// that will be modified should be copied to local variables in the
// goroutine, and then it should be released. The blocks should
// then be modified locally, and then readied and pushed out
// remotely. Only after the blocks have been pushed to the server
// should a remote-sync operation take the lock again (this time
// for writing) and put/finalize the blocks. Write and Truncate
// should take blockLock for their entire lifetime, since they
// don't involve writes over the network. Furthermore, if a block
// is not in the cache and needs to be fetched, we should release
// the mutex before doing the network operation, and lock it again
// before writing the block back to the cache.
//
// We want to allow writes and truncates to a file that's currently
// being sync'd, like any good networked file system. The tricky part
// is making sure the changes can both: a) be read while the sync is
// happening, and b) be applied to the new file path after the sync is
// done.
//
// For now, we just do the dumb, brute force thing for now: if a block
// is currently being sync'd, it copies the block and puts it back
// into the cache as modified. Then, when the sync finishes, it
// throws away the modified blocks and re-applies the change to the
// new file path (which might have a completely different set of
// blocks, so we can't just reuse the blocks that were modified during
// the sync.)
type folderBranchOps struct {
config Config
folderBranch FolderBranch
bid BranchID // protected by mdWriterLock
bType branchType
observers *observerList
// these locks, when locked concurrently by the same goroutine,
// should only be taken in the following order to avoid deadlock:
mdWriterLock leveledMutex // taken by any method making MD modifications
// protects access to head and latestMergedRevision.
headLock leveledRWMutex
head ImmutableRootMetadata
// latestMergedRevision tracks the latest heard merged revision on server
latestMergedRevision MetadataRevision
blocks folderBlockOps
// nodeCache itself is goroutine-safe, but this object's use
// of it has special requirements:
//
// - Reads can call PathFromNode() unlocked, since there are
// no guarantees with concurrent reads.
//
// - Operations that takes mdWriterLock always needs the
// most up-to-date paths, so those must call
// PathFromNode() under mdWriterLock.
//
// - Block write operations (write/truncate/sync) need to
// coordinate. Specifically, sync must make sure that
// blocks referenced in a path (including all of the child
// blocks) must exist in the cache during calls to
// PathFromNode from write/truncate. This means that sync
// must modify dirty file blocks only under blockLock, and
// write/truncate must call PathFromNode() under
// blockLock.
//
// Furthermore, calls to UpdatePointer() must happen
// before the copy-on-write mode induced by Sync() is
// finished.
nodeCache NodeCache
// Whether we've identified this TLF or not.
identifyLock sync.Mutex
identifyDone bool
identifyTime time.Time
// The current status summary for this folder
status *folderBranchStatusKeeper
// How to log
log logger.Logger
deferLog logger.Logger
// Closed on shutdown
shutdownChan chan struct{}
// Can be used to turn off notifications for a while (e.g., for testing)
updatePauseChan chan (<-chan struct{})
// After a shutdown, this channel will be closed when the register
// goroutine completes.
updateDoneChan chan struct{}
// forceSyncChan is read from by the background sync process
// to know when it should sync immediately.
forceSyncChan <-chan struct{}
// How to resolve conflicts
cr *ConflictResolver
// Helper class for archiving and cleaning up the blocks for this TLF
fbm *folderBlockManager
// rekeyWithPromptTimer tracks a timed function that will try to
// rekey with a paper key prompt, if enough time has passed.
// Protected by mdWriterLock
rekeyWithPromptTimer *time.Timer
editHistory *TlfEditHistory
branchChanges kbfssync.RepeatedWaitGroup
mdFlushes kbfssync.RepeatedWaitGroup
}
var _ KBFSOps = (*folderBranchOps)(nil)
var _ fbmHelper = (*folderBranchOps)(nil)
// newFolderBranchOps constructs a new folderBranchOps object.
func newFolderBranchOps(config Config, fb FolderBranch,
bType branchType) *folderBranchOps {
nodeCache := newNodeCacheStandard(fb)
// make logger
branchSuffix := ""
if fb.Branch != MasterBranch {
branchSuffix = " " + string(fb.Branch)
}
tlfStringFull := fb.Tlf.String()
// Shorten the TLF ID for the module name. 8 characters should be
// unique enough for a local node.
log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8],
branchSuffix))
// But print it out once in full, just in case.
log.CInfof(nil, "Created new folder-branch for %s", tlfStringFull)
observers := newObserverList()
mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{})
headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{})
blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{})
forceSyncChan := make(chan struct{})
fbo := &folderBranchOps{
config: config,
folderBranch: fb,
bid: BranchID{},
bType: bType,
observers: observers,
status: newFolderBranchStatusKeeper(config, nodeCache),
mdWriterLock: mdWriterLock,
headLock: headLock,
blocks: folderBlockOps{
config: config,
log: log,
folderBranch: fb,
observers: observers,
forceSyncChan: forceSyncChan,
blockLock: blockLock{
leveledRWMutex: blockLockMu,
},
dirtyFiles: make(map[BlockPointer]*dirtyFile),
unrefCache: make(map[BlockRef]*syncInfo),
deCache: make(map[BlockRef]DirEntry),
nodeCache: nodeCache,
},
nodeCache: nodeCache,
log: log,
deferLog: log.CloneWithAddedDepth(1),
shutdownChan: make(chan struct{}),
updatePauseChan: make(chan (<-chan struct{})),
forceSyncChan: forceSyncChan,
}
fbo.cr = NewConflictResolver(config, fbo)
fbo.fbm = newFolderBlockManager(config, fb, fbo)
fbo.editHistory = NewTlfEditHistory(config, fbo, log)
if config.DoBackgroundFlushes() {
go fbo.backgroundFlusher(secondsBetweenBackgroundFlushes * time.Second)
}
return fbo
}
// markForReIdentifyIfNeeded checks whether this tlf is identified and mark
// it for lazy reidentification if it exceeds time limits.
func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) {
fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime)
fbo.identifyDone = false
}
}
// Shutdown safely shuts down any background goroutines that may have
// been launched by folderBranchOps.
func (fbo *folderBranchOps) Shutdown(ctx context.Context) error {
if fbo.config.CheckStateOnShutdown() {
lState := makeFBOLockState()
if fbo.blocks.GetState(lState) == dirtyState {
fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state")
} else if !fbo.isMasterBranch(lState) {
fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged")
} else {
// Make sure we're up to date first
if err := fbo.SyncFromServerForTesting(ctx, fbo.folderBranch); err != nil {
return err
}
// Check the state for consistency before shutting down.
sc := NewStateChecker(fbo.config)
if err := sc.CheckMergedState(ctx, fbo.id()); err != nil {
return err
}
}
}
close(fbo.shutdownChan)
fbo.cr.Shutdown()
fbo.fbm.shutdown()
fbo.editHistory.Shutdown()
// Wait for the update goroutine to finish, so that we don't have
// any races with logging during test reporting.
if fbo.updateDoneChan != nil {
<-fbo.updateDoneChan
}
return nil
}
func (fbo *folderBranchOps) id() tlf.ID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBranchOps) branch() BranchName {
return fbo.folderBranch.Branch
}
func (fbo *folderBranchOps) GetFavorites(ctx context.Context) (
[]Favorite, error) {
return nil, errors.New("GetFavorites is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) {
// no-op
}
func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("DeleteFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) AddFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("AddFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) addToFavorites(ctx context.Context,
favorites *Favorites, created bool) (err error) {
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head == (ImmutableRootMetadata{}) {
return OpsCantHandleFavorite{"Can't add a favorite without a handle"}
}
return fbo.addToFavoritesByHandle(ctx, favorites, head.GetTlfHandle(), created)
}
func (fbo *folderBranchOps) addToFavoritesByHandle(ctx context.Context,
favorites *Favorites, handle *TlfHandle, created bool) (err error) {
if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil {
// Can't favorite while not logged in
return nil
}
favorites.AddAsync(ctx, handle.toFavToAdd(created))
return nil
}
func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context,
favorites *Favorites) error {
if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil {
// Can't unfavorite while not logged in
return nil
}
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head == (ImmutableRootMetadata{}) {
// This can happen when identifies fail and the head is never set.
return OpsCantHandleFavorite{"Can't delete a favorite without a handle"}
}
h := head.GetTlfHandle()
return favorites.Delete(ctx, h.ToFavorite())
}
// getHead should not be called outside of folder_branch_ops.go.
func (fbo *folderBranchOps) getHead(lState *lockState) ImmutableRootMetadata {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.head
}
// isMasterBranch should not be called if mdWriterLock is already taken.
func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid BranchID) {
fbo.mdWriterLock.AssertLocked(lState)
fbo.bid = bid
if bid == NullBranchID {
fbo.status.setCRSummary(nil, nil)
}
}
var errNoFlushedRevisions = errors.New("No flushed MDs yet")
// getJournalPredecessorRevision returns the revision that precedes
// the current journal head if journaling enabled and there are
// unflushed MD updates; otherwise it returns
// MetadataRevisionUninitialized. If there aren't any flushed MD
// revisions, it returns errNoFlushedRevisions.
func (fbo *folderBranchOps) getJournalPredecessorRevision(ctx context.Context) (
MetadataRevision, error) {
jServer, err := GetJournalServer(fbo.config)
if err != nil {
// Journaling is disabled entirely.
return MetadataRevisionUninitialized, nil
}
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
// Journaling is disabled for this TLF, so use the local head.
// TODO: JournalStatus could return other errors (likely
// file/disk corruption) that indicate a real problem, so it
// might be nice to type those errors so we can distinguish
// them.
return MetadataRevisionUninitialized, nil
}
if jStatus.BranchID != NullBranchID.String() {
return MetadataRevisionUninitialized,
errors.New("Cannot find most recent merged revision while staged")
}
if jStatus.RevisionStart == MetadataRevisionUninitialized {
// The journal is empty, so the local head must be the most recent.
return MetadataRevisionUninitialized, nil
} else if jStatus.RevisionStart == MetadataRevisionInitial {
// Nothing has been flushed to the servers yet, so don't
// return anything.
return MetadataRevisionUninitialized, errNoFlushedRevisions
}
return jStatus.RevisionStart - 1, nil
}
func (fbo *folderBranchOps) setHeadLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
isFirstHead := fbo.head == ImmutableRootMetadata{}
wasReadable := false
if !isFirstHead {
wasReadable = fbo.head.IsReadable()
if fbo.head.mdID == md.mdID {
panic(errors.Errorf("Re-putting the same MD: %s", md.mdID))
}
}
fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision())
err := fbo.config.MDCache().Put(md)
if err != nil {
return err
}
// If this is the first time the MD is being set, and we are
// operating on unmerged data, initialize the state properly and
// kick off conflict resolution.
if isFirstHead && md.MergedStatus() == Unmerged {
fbo.setBranchIDLocked(lState, md.BID())
// Use uninitialized for the merged branch; the unmerged
// revision is enough to trigger conflict resolution.
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
} else if md.MergedStatus() == Merged {
journalEnabled := TLFJournalEnabled(fbo.config, fbo.id())
var key kbfscrypto.VerifyingKey
if journalEnabled {
if isFirstHead {
// If journaling is on, and this is the first head
// we're setting, we have to make sure we use the
// server's notion of the latest MD, not the one
// potentially coming from our journal. If there are
// no flushed revisions, it's not a hard error, and we
// just leave the latest merged revision
// uninitialized.
journalPred, err := fbo.getJournalPredecessorRevision(ctx)
switch err {
case nil:
// journalPred will be
// MetadataRevisionUninitialized when the journal
// is empty.
if journalPred >= MetadataRevisionInitial {
fbo.setLatestMergedRevisionLocked(
ctx, lState, journalPred, false)
} else {
fbo.setLatestMergedRevisionLocked(ctx, lState,
md.Revision(), false)
}
case errNoFlushedRevisions:
// The server has no revisions, so leave the
// latest merged revision uninitialized.
default:
return err
}
} else {
// If this isn't the first head, then this is either
// an update from the server, or an update just
// written by the client. But since journaling is on,
// then latter case will be handled by onMDFlush when
// the update is properly flushed to the server. So
// ignore updates written by this device.
key, err = fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
if key != md.LastModifyingWriterVerifyingKey() {
fbo.setLatestMergedRevisionLocked(
ctx, lState, md.Revision(), false)
}
}
} else {
// This is a merged revision, and journaling is disabled,
// so it's definitely the latest revision on the server as
// well.
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
}
}
// Make sure that any unembedded block changes have been swapped
// back in.
if md.data.Changes.Info.BlockPointer != zeroPtr &&
len(md.data.Changes.Ops) == 0 {
return errors.New("Must swap in block changes before setting head")
}
fbo.head = md
fbo.status.setRootMetadata(md)
if isFirstHead {
// Start registering for updates right away, using this MD
// as a starting point. For now only the master branch can
// get updates
if fbo.branch() == MasterBranch {
fbo.updateDoneChan = make(chan struct{})
go fbo.registerAndWaitForUpdates()
}
}
if !wasReadable && md.IsReadable() {
// Let any listeners know that this folder is now readable,
// which may indicate that a rekey successfully took place.
fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification(
md.GetTlfHandle(), md.TlfID().IsPublic()))
}
return nil
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched not due to a user action, i.e. via a Rekey
// notification, and we don't have a TLF name to check against.
func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setNewInitialHeadLocked is for when we're creating a brand-new TLF.
func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setNewInitialHeadLocked")
}
if md.Revision() != MetadataRevisionInitial {
return errors.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision())
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched due to a user action, and will be checked against the
// TLF name.
func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setHeadSuccessorLocked is for when we're applying updates from the
// server or when we're applying new updates we created ourselves.
func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata, rebased bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// This can happen in tests via SyncFromServerForTesting().
return fbo.setInitialHeadTrustedLocked(ctx, lState, md)
}
if !rebased {
err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly())
if err != nil {
return err
}
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// Newer handles should be equal or more resolved over time.
//
// TODO: In some cases, they shouldn't, e.g. if we're on an
// unmerged branch. Add checks for this.
resolvesTo, partialResolvedOldHandle, err :=
oldHandle.ResolvesTo(
ctx, fbo.config.Codec(), fbo.config.KBPKI(),
*newHandle)
if err != nil {
return err
}
oldName := oldHandle.GetCanonicalName()
newName := newHandle.GetCanonicalName()
if !resolvesTo {
return IncompatibleHandleError{
oldName,
partialResolvedOldHandle.GetCanonicalName(),
newName,
}
}
err = fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return err
}
if oldName != newName {
fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)",
oldName, newName)
// If the handle has changed, send out a notification.
fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle())
// Also the folder should be re-identified given the
// newly-resolved assertions.
func() {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
fbo.identifyDone = false
}()
}
return nil
}
// setHeadPredecessorLocked is for when we're unstaging updates.
func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return errors.New("Unexpected nil head in setHeadPredecessorLocked")
}
if fbo.head.Revision() <= MetadataRevisionInitial {
return errors.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision())
}
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadPredecessorLocked")
}
err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly())
if err != nil {
return err
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// The two handles must be the same, since no rekeying is done
// while unmerged.
eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle)
if err != nil {
return err
}
if !eq {
return errors.Errorf(
"head handle %v unexpectedly not equal to new handle = %v",
oldHandle, newHandle)
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setHeadConflictResolvedLocked is for when we're setting the merged
// update with resolved conflicts.
func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadConflictResolvedLocked")
}
if md.MergedStatus() != Merged {
return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked")
}
return fbo.setHeadLocked(ctx, lState, md)
}
func (fbo *folderBranchOps) identifyOnce(
ctx context.Context, md ReadOnlyRootMetadata) error {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
ei := getExtendedIdentify(ctx)
if fbo.identifyDone && !ei.behavior.AlwaysRunIdentify() {
// TODO: provide a way for the service to break this cache when identify
// state changes on a TLF. For now, we do it this way to make chat work.
return nil
}
h := md.GetTlfHandle()
fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath())
kbpki := fbo.config.KBPKI()
err := identifyHandle(ctx, kbpki, kbpki, h)
if err != nil {
fbo.log.CDebugf(ctx, "Identify finished with error: %v", err)
// For now, if the identify fails, let the
// next function to hit this code path retry.
return err
}
if ei.behavior.WarningInsteadOfErrorOnBrokenTracks() &&
len(ei.getTlfBreakAndClose().Breaks) > 0 {
fbo.log.CDebugf(ctx,
"Identify finished with no error but broken proof warnings")
} else {
fbo.log.CDebugf(ctx, "Identify finished successfully")
fbo.identifyDone = true
fbo.identifyTime = fbo.config.Clock().Now()
}
return nil
}
// if rtype == mdWrite || mdRekey, then mdWriterLock must be taken
func (fbo *folderBranchOps) getMDLocked(
ctx context.Context, lState *lockState, rtype mdReqType) (
md ImmutableRootMetadata, err error) {
defer func() {
if err != nil || rtype == mdReadNoIdentify || rtype == mdRekey {
return
}
err = fbo.identifyOnce(ctx, md.ReadOnly())
}()
md = fbo.getHead(lState)
if md != (ImmutableRootMetadata{}) {
return md, nil
}
// Unless we're in mdWrite or mdRekey mode, we can't safely fetch
// the new MD without causing races, so bail.
if rtype != mdWrite && rtype != mdRekey {
return ImmutableRootMetadata{}, MDWriteNeededInRequest{}
}
// We go down this code path either due to a rekey
// notification for an unseen TLF, or in some tests.
//
// TODO: Make tests not take this code path, and keep track of
// the fact that MDs coming from rekey notifications are
// untrusted.
fbo.mdWriterLock.AssertLocked(lState)
// Not in cache, fetch from server and add to cache. First, see
// if this device has any unmerged commits -- take the latest one.
mdops := fbo.config.MDOps()
// get the head of the unmerged branch for this device (if any)
md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), NullBranchID)
if err != nil {
return ImmutableRootMetadata{}, err
}
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedMD == (ImmutableRootMetadata{}) {
return ImmutableRootMetadata{}, errors.Errorf(
"Got nil RMD for %s", fbo.id())
}
if md == (ImmutableRootMetadata{}) {
// There are no unmerged MDs for this device, so just use the current head.
md = mergedMD
} else {
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// We don't need to do this for merged head
// because the setHeadLocked() already does
// that anyway.
fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false)
}()
}
if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) {
return ImmutableRootMetadata{}, errors.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable())
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setInitialHeadUntrustedLocked(ctx, lState, md)
if err != nil {
return ImmutableRootMetadata{}, err
}
return md, nil
}
func (fbo *folderBranchOps) getMDForReadHelper(
ctx context.Context, lState *lockState, rtype mdReqType) (ImmutableRootMetadata, error) {
md, err := fbo.getMDLocked(ctx, lState, rtype)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.TlfID().IsPublic() {
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.GetTlfHandle().IsReader(uid) {
return ImmutableRootMetadata{}, NewReadAccessError(md.GetTlfHandle(), username, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
// getMostRecentFullyMergedMD is a helper method that returns the most
// recent merged MD that has been flushed to the server. This could
// be different from the current local head if journaling is on. If
// the journal is on a branch, it returns an error.
func (fbo *folderBranchOps) getMostRecentFullyMergedMD(ctx context.Context) (
ImmutableRootMetadata, error) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedRev == MetadataRevisionUninitialized {
// No unflushed journal entries, so use the local head.
lState := makeFBOLockState()
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
// Otherwise, use the specified revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
mergedRev, Merged)
if err != nil {
return ImmutableRootMetadata{}, err
}
fbo.log.CDebugf(ctx, "Most recent fully merged revision is %d", mergedRev)
return rmd, nil
}
func (fbo *folderBranchOps) getMDForReadNoIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
func (fbo *folderBranchOps) getMDForReadNeedIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
}
// getMDForReadNeedIdentifyOnMaybeFirstAccess should be called by a
// code path (like chat) that might be accessing this folder for the
// first time. Other folderBranchOps methods like Lookup which know
// the folder has already been accessed at least once (to get the root
// node, for example) do not need to call this.
func (fbo *folderBranchOps) getMDForReadNeedIdentifyOnMaybeFirstAccess(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
irmd, err := fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
if _, ok := err.(MDWriteNeededInRequest); ok {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
irmd, err = fbo.getMDForReadHelper(ctx, lState, mdWrite)
}
return irmd, err
}
// getMDForWriteLocked returns a new RootMetadata object with an
// incremented version number for modification. If the returned object
// is put to the MDServer (via MDOps), mdWriterLock must be held until
// then. (See comments for mdWriterLock above.)
func (fbo *folderBranchOps) getMDForWriteLocked(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
return fbo.getMDForWriteLockedForFilename(ctx, lState, "")
}
func (fbo *folderBranchOps) getMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (*RootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDLocked(ctx, lState, mdWrite)
if err != nil {
return nil, err
}
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, err
}
if !md.GetTlfHandle().IsWriter(uid) {
return nil, NewWriteAccessError(md.GetTlfHandle(), username, filename)
}
// Make a new successor of the current MD to hold the coming
// writes. The caller must pass this into
// syncBlockAndCheckEmbedLocked or the changes will be lost.
newMd, err := md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(), fbo.config.Crypto(),
fbo.config.KeyManager(), md.mdID, true)
if err != nil {
return nil, err
}
return newMd, nil
}
func (fbo *folderBranchOps) getMDForRekeyWriteLocked(
ctx context.Context, lState *lockState) (
rmd *RootMetadata, lastWriterVerifyingKey kbfscrypto.VerifyingKey,
wasRekeySet bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDLocked(ctx, lState, mdRekey)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
handle := md.GetTlfHandle()
// must be a reader or writer (it checks both.)
if !handle.IsReader(uid) {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(md.GetTlfHandle(), username)
}
newMd, err := md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(), fbo.config.Crypto(),
fbo.config.KeyManager(), md.mdID, handle.IsWriter(uid))
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
// readers shouldn't modify writer metadata
if !handle.IsWriter(uid) && !newMd.IsWriterMetadataCopiedSet() {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(handle, username)
}
return newMd, md.LastModifyingWriterVerifyingKey(), md.IsRekeySet(), nil
}
func (fbo *folderBranchOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
func (fbo *folderBranchOps) maybeUnembedAndPutBlocks(ctx context.Context,
md *RootMetadata) (*blockPutState, error) {
if fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) {
return nil, nil
}
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, err
}
bps := newBlockPutState(1)
err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, uid)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
ptrsToDelete, err := doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return nil, err
}
if len(ptrsToDelete) > 0 {
return nil, errors.Errorf("Unexpected pointers to delete after "+
"unembedding block changes in gc op: %v", ptrsToDelete)
}
return bps, nil
}
// ResetRootBlock creates a new empty dir block and sets the given
// metadata's root block to it.
func ResetRootBlock(ctx context.Context, config Config,
currentUID keybase1.UID, rmd *RootMetadata) (
Block, BlockInfo, ReadyBlockData, error) {
newDblock := NewDirBlock()
info, plainSize, readyBlockData, err :=
ReadyBlock(ctx, config.BlockCache(), config.BlockOps(),
config.Crypto(), rmd.ReadOnly(), newDblock, currentUID)
if err != nil {
return nil, BlockInfo{}, ReadyBlockData{}, err
}
now := config.Clock().Now().UnixNano()
rmd.data.Dir = DirEntry{
BlockInfo: info,
EntryInfo: EntryInfo{
Type: Dir,
Size: uint64(plainSize),
Mtime: now,
Ctime: now,
},
}
prevDiskUsage := rmd.DiskUsage()
rmd.SetDiskUsage(0)
// Redundant, since this is called only for brand-new or
// successor RMDs, but leave in to be defensive.
rmd.ClearBlockChanges()
co := newCreateOpForRootDir()
rmd.AddOp(co)
rmd.AddRefBlock(rmd.data.Dir.BlockInfo)
// Set unref bytes to the previous disk usage, so that the
// accounting works out.
rmd.AddUnrefBytes(prevDiskUsage)
return newDblock, info, readyBlockData, nil
}
func (fbo *folderBranchOps) initMDLocked(
ctx context.Context, lState *lockState, md *RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return err
}
handle := md.GetTlfHandle()
// make sure we're a writer before rekeying or putting any blocks.
if !handle.IsWriter(uid) {
return NewWriteAccessError(handle, username, handle.GetCanonicalPath())
}
var expectedKeyGen KeyGen
var tlfCryptKey *kbfscrypto.TLFCryptKey
if md.TlfID().IsPublic() {
expectedKeyGen = PublicKeyGen
} else {
var rekeyDone bool
// create a new set of keys for this metadata
rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false)
if err != nil {
return err
}
if !rekeyDone {
return errors.Errorf("Initial rekey unexpectedly not done for "+
"private TLF %v", md.TlfID())
}
expectedKeyGen = FirstValidKeyGen
}
keyGen := md.LatestKeyGeneration()
if keyGen != expectedKeyGen {
return InvalidKeyGenerationError{md.TlfID(), keyGen}
}
// create a dblock since one doesn't exist yet
newDblock, info, readyBlockData, err :=
ResetRootBlock(ctx, fbo.config, uid, md)
if err != nil {
return err
}
// Some other thread got here first, so give up and let it go
// before we push anything to the servers.
if fbo.getHead(lState) != (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "Head was already set, aborting")
return nil
}
if err = PutBlockCheckQuota(ctx, fbo.config.BlockServer(),
fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData,
md.GetTlfHandle().GetCanonicalName()); err != nil {
return err
}
if err = fbo.config.BlockCache().Put(
info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil {
return err
}
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
// finally, write out the new metadata
mdID, err := fbo.config.MDOps().Put(ctx, md)
if err != nil {
return err
}
md.loadCachedBlockChanges(ctx, bps)
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.Errorf(
"%v: Unexpected MD ID during new MD initialization: %v",
md.TlfID(), fbo.head.mdID)
}
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
fbo.setNewInitialHeadLocked(ctx, lState, MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now()))
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context,
h *TlfHandle) (keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) {
return nil, tlf.ID{}, errors.New("GetTLFCryptKeys is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetTLFID(ctx context.Context, h *TlfHandle) (tlf.ID, error) {
return tlf.ID{}, errors.New("GetTLFID is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetOrCreateRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) checkNode(node Node) error {
fb := node.GetFolderBranch()
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return nil
}
// SetInitialHeadFromServer sets the head to the given
// ImmutableRootMetadata, which must be retrieved from the MD server.
func (fbo *folderBranchOps) SetInitialHeadFromServer(
ctx context.Context, md ImmutableRootMetadata) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)",
md.Revision(), md.MergedStatus())
defer func() {
fbo.deferLog.CDebugf(ctx,
"SetInitialHeadFromServer, revision=%d (%s) done: %+v",
md.Revision(), md.MergedStatus(), err)
}()
if md.data.Dir.Type != Dir {
// Not initialized.
return errors.Errorf("MD with revision=%d not initialized",
md.Revision())
}
// We will prefetch this as on-demand so that it triggers downstream
// prefetches.
fbo.config.BlockOps().Prefetcher().PrefetchBlock(
&DirBlock{}, md.data.Dir.BlockPointer, md, defaultOnDemandRequestPriority)
// Return early if the head is already set. This avoids taking
// mdWriterLock for no reason, and it also avoids any side effects
// (e.g., calling `identifyOnce` and downloading the merged
// head) if head is already set.
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) && head.mdID == md.mdID {
fbo.log.CDebugf(ctx, "Head MD already set to revision %d (%s), no "+
"need to set initial head again", md.Revision(), md.MergedStatus())
return nil
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{md.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, md.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if md.MergedStatus() == Unmerged {
mdops := fbo.config.MDOps()
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return err
}
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState,
mergedMD.Revision(), false)
}()
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Only update the head the first time; later it will be
// updated either directly via writes or through the
// background update processor.
if fbo.head == (ImmutableRootMetadata{}) {
err = fbo.setInitialHeadTrustedLocked(ctx, lState, md)
if err != nil {
return err
}
}
return nil
})
}
// SetInitialHeadToNew creates a brand-new ImmutableRootMetadata
// object and sets the head to that.
func (fbo *folderBranchOps) SetInitialHeadToNew(
ctx context.Context, id tlf.ID, handle *TlfHandle) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadToNew %s", id)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetInitialHeadToNew %s done: %+v",
id, err)
}()
rmd, err := makeInitialRootMetadata(
fbo.config.MetadataVersion(), id, handle)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{rmd.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, rmd.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.initMDLocked(ctx, lState, rmd)
})
}
// execMDReadNoIdentifyThenMDWrite first tries to execute the
// passed-in method in mdReadNoIdentify mode. If it fails with an
// MDWriteNeededInRequest error, it re-executes the method as in
// mdWrite mode. The passed-in method must note whether or not this
// is an mdWrite call.
//
// This must only be used by getRootNode().
func (fbo *folderBranchOps) execMDReadNoIdentifyThenMDWrite(
lState *lockState, f func(*lockState, mdReqType) error) error {
err := f(lState, mdReadNoIdentify)
// Redo as an MD write request if needed
if _, ok := err.(MDWriteNeededInRequest); ok {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
err = f(lState, mdWrite)
}
return err
}
func getNodeIDStr(n Node) string {
if n == nil {
return "NodeID(nil)"
}
return fmt.Sprintf("NodeID(%v)", n.GetID())
}
func (fbo *folderBranchOps) getRootNode(ctx context.Context) (
node Node, ei EntryInfo, handle *TlfHandle, err error) {
fbo.log.CDebugf(ctx, "getRootNode")
defer func() {
fbo.deferLog.CDebugf(ctx, "getRootNode done: %s %+v",
getNodeIDStr(node), err)
}()
lState := makeFBOLockState()
var md ImmutableRootMetadata
err = fbo.execMDReadNoIdentifyThenMDWrite(lState,
func(lState *lockState, rtype mdReqType) error {
md, err = fbo.getMDLocked(ctx, lState, rtype)
return err
})
if err != nil {
return nil, EntryInfo{}, nil, err
}
// we may be an unkeyed client
if err := isReadableOrError(ctx, fbo.config.KBPKI(), md.ReadOnly()); err != nil {
return nil, EntryInfo{}, nil, err
}
handle = md.GetTlfHandle()
node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer,
string(handle.GetCanonicalName()), nil)
if err != nil {
return nil, EntryInfo{}, nil, err
}
return node, md.Data().Dir.EntryInfo, handle, nil
}
type makeNewBlock func() Block
// pathFromNodeHelper() shouldn't be called except by the helper
// functions below.
func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) {
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// Helper functions to clarify uses of pathFromNodeHelper() (see
// nodeCache comments).
func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) {
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) (
children map[string]EntryInfo, err error) {
fbo.log.CDebugf(ctx, "GetDirChildren %s", getNodeIDStr(dir))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetDirChildren %s done: %+v",
getNodeIDStr(dir), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, err
}
err = runUnlessCanceled(ctx, func() error {
var err error
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node
// has been unlinked. Probably we have fast-forwarded, and
// missed all the updates deleting the children in this
// directory. In that case, just return an empty set of
// children so we don't return an incorrect set from the
// cache.
if md.data.Dir.BlockPointer.ID != dirPath.path[0].BlockPointer.ID {
fbo.log.CDebugf(ctx, "Returning an empty children set for "+
"unlinked directory %v", dirPath.tailPointer())
return nil
}
children, err = fbo.blocks.GetDirtyDirChildren(
ctx, lState, md.ReadOnly(), dirPath)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return children, nil
}
func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) (
node Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Lookup %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "Lookup %s %s done: %v %+v",
getNodeIDStr(dir), name, getNodeIDStr(node), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
node, de, err = fbo.blocks.Lookup(ctx, lState, md.ReadOnly(), dir, name)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, EntryInfo{}, err
}
return node, de.EntryInfo, nil
}
// statEntry is like Stat, but it returns a DirEntry. This is used by
// tests.
func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) (
de DirEntry, err error) {
err = fbo.checkNode(node)
if err != nil {
return DirEntry{}, err
}
lState := makeFBOLockState()
nodePath, err := fbo.pathFromNodeForRead(node)
if err != nil {
return DirEntry{}, err
}
var md ImmutableRootMetadata
if nodePath.hasValidParent() {
md, err = fbo.getMDForReadNeedIdentify(ctx, lState)
} else {
// If nodePath has no valid parent, it's just the TLF
// root, so we don't need an identify in this case.
md, err = fbo.getMDForReadNoIdentify(ctx, lState)
}
if err != nil {
return DirEntry{}, err
}
if nodePath.hasValidParent() {
de, err = fbo.blocks.GetDirtyEntry(
ctx, lState, md.ReadOnly(), nodePath)
if err != nil {
return DirEntry{}, err
}
} else {
// nodePath is just the root.
de = md.data.Dir
}
return de, nil
}
var zeroPtr BlockPointer
type blockState struct {
blockPtr BlockPointer
block Block
readyBlockData ReadyBlockData
syncedCb func() error
}
func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Stat %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "Stat %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return EntryInfo{}, err
}
return de.EntryInfo, nil
}
func (fbo *folderBranchOps) GetNodeMetadata(ctx context.Context, node Node) (
ei NodeMetadata, err error) {
fbo.log.CDebugf(ctx, "GetNodeMetadata %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetNodeMetadata %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
var res NodeMetadata
if err != nil {
return res, err
}
res.BlockInfo = de.BlockInfo
uid := de.Writer
if uid == keybase1.UID("") {
uid = de.Creator
}
res.LastWriterUnverified, err =
fbo.config.KBPKI().GetNormalizedUsername(ctx, uid)
if err != nil {
return res, err
}
return res, nil
}
// blockPutState is an internal structure to track data when putting blocks
type blockPutState struct {
blockStates []blockState
}
func newBlockPutState(length int) *blockPutState {
bps := &blockPutState{}
bps.blockStates = make([]blockState, 0, length)
return bps
}
// addNewBlock tracks a new block that will be put. If syncedCb is
// non-nil, it will be called whenever the put for that block is
// complete (whether or not the put resulted in an error). Currently
// it will not be called if the block is never put (due to an earlier
// error).
func (bps *blockPutState) addNewBlock(blockPtr BlockPointer, block Block,
readyBlockData ReadyBlockData, syncedCb func() error) {
bps.blockStates = append(bps.blockStates,
blockState{blockPtr, block, readyBlockData, syncedCb})
}
func (bps *blockPutState) mergeOtherBps(other *blockPutState) {
bps.blockStates = append(bps.blockStates, other.blockStates...)
}
func (bps *blockPutState) DeepCopy() *blockPutState {
newBps := &blockPutState{}
newBps.blockStates = make([]blockState, len(bps.blockStates))
copy(newBps.blockStates, bps.blockStates)
return newBps
}
func (fbo *folderBranchOps) readyBlockMultiple(ctx context.Context,
kmd KeyMetadata, currBlock Block, uid keybase1.UID,
bps *blockPutState) (info BlockInfo, plainSize int, err error) {
info, plainSize, readyBlockData, err :=
ReadyBlock(ctx, fbo.config.BlockCache(), fbo.config.BlockOps(),
fbo.config.Crypto(), kmd, currBlock, uid)
if err != nil {
return
}
bps.addNewBlock(info.BlockPointer, currBlock, readyBlockData, nil)
return
}
func (fbo *folderBranchOps) unembedBlockChanges(
ctx context.Context, bps *blockPutState, md *RootMetadata,
changes *BlockChanges, uid keybase1.UID) error {
buf, err := fbo.config.Codec().Encode(changes)
if err != nil {
return err
}
// Treat the block change list as a file so we can reuse all the
// indirection code in fileData.
block := NewFileBlock().(*FileBlock)
bid, err := fbo.config.Crypto().MakeTemporaryBlockID()
if err != nil {
return err
}
ptr := BlockPointer{
ID: bid,
KeyGen: md.LatestKeyGeneration(),
DataVer: fbo.config.DataVersion(),
DirectType: DirectBlock,
Context: kbfsblock.Context{
Creator: uid,
RefNonce: kbfsblock.ZeroRefNonce,
},
}
file := path{fbo.folderBranch,
[]pathNode{{ptr, fmt.Sprintf("<MD rev %d>", md.Revision())}}}
dirtyBcache := simpleDirtyBlockCacheStandard()
// Simple dirty bcaches don't need to be shut down.
getter := func(_ context.Context, _ KeyMetadata, ptr BlockPointer,
_ path, _ blockReqType) (*FileBlock, bool, error) {
block, err := dirtyBcache.Get(fbo.id(), ptr, fbo.branch())
if err != nil {
return nil, false, err
}
fblock, ok := block.(*FileBlock)
if !ok {
return nil, false,
errors.Errorf("Block for %s is not a file block", ptr)
}
return fblock, true, nil
}
cacher := func(ptr BlockPointer, block Block) error {
return dirtyBcache.Put(fbo.id(), ptr, fbo.branch(), block)
}
// Start off the cache with the new block
err = cacher(ptr, block)
if err != nil {
return err
}
df := newDirtyFile(file, dirtyBcache)
fd := newFileData(file, uid, fbo.config.Crypto(),
fbo.config.BlockSplitter(), md.ReadOnly(), getter, cacher, fbo.log)
// Write all the data.
_, _, _, _, _, err = fd.write(ctx, buf, 0, block, DirEntry{}, df)
if err != nil {
return err
}
// There might be a new top block.
topBlock, err := dirtyBcache.Get(fbo.id(), ptr, fbo.branch())
if err != nil {
return err
}
block, ok := topBlock.(*FileBlock)
if !ok {
return errors.New("Top block change block no longer a file block")
}
// Ready all the child blocks.
infos, err := fd.ready(ctx, fbo.id(), fbo.config.BlockCache(),
dirtyBcache, fbo.config.BlockOps(), bps, block, df)
if err != nil {
return err
}
for info := range infos {
md.AddRefBytes(uint64(info.EncodedSize))
md.AddDiskUsage(uint64(info.EncodedSize))
}
fbo.log.CDebugf(ctx, "%d unembedded child blocks", len(infos))
// Ready the top block.
info, _, err := fbo.readyBlockMultiple(
ctx, md.ReadOnly(), block, uid, bps)
if err != nil {
return err
}
md.AddRefBytes(uint64(info.EncodedSize))
md.AddDiskUsage(uint64(info.EncodedSize))
changes.Info = info
md.data.cachedChanges = *changes
changes.Ops = nil
return nil
}
type localBcache map[BlockPointer]*DirBlock
// syncBlock updates, and readies, the blocks along the path for the
// given write, up to the root of the tree or stopAt (if specified).
// When it updates the root of the tree, it also modifies the given
// head object with a new revision number and root block ID. It first
// checks the provided lbc for blocks that may have been modified by
// previous syncBlock calls or the FS calls themselves. It returns
// the updated path to the changed directory, the new or updated
// directory entry created as part of the call, and a summary of all
// the blocks that now must be put to the block server.
//
// This function is safe to use unlocked, but may modify MD to have
// the same revision number as another one. All functions in this file
// must call syncBlockLocked instead, which holds mdWriterLock and
// thus serializes the revision numbers. Conflict resolution may call
// syncBlockForConflictResolution, which doesn't hold the lock, since
// it already handles conflicts correctly.
//
// entryType must not be Sym.
//
// TODO: deal with multiple nodes for indirect blocks
func (fbo *folderBranchOps) syncBlock(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
// now ready each dblock and write the DirEntry for the next one
// in the path
currBlock := newBlock
currName := name
newPath := path{
FolderBranch: dir.FolderBranch,
path: make([]pathNode, 0, len(dir.path)),
}
bps := newBlockPutState(len(dir.path))
refPath := dir.ChildPathNoPtr(name)
var newDe DirEntry
doSetTime := true
now := fbo.nowUnixNano()
for len(newPath.path) < len(dir.path)+1 {
info, plainSize, err := fbo.readyBlockMultiple(
ctx, md.ReadOnly(), currBlock, uid, bps)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// prepend to path and setup next one
newPath.path = append([]pathNode{{info.BlockPointer, currName}},
newPath.path...)
// get the parent block
prevIdx := len(dir.path) - len(newPath.path)
var prevDblock *DirBlock
var de DirEntry
var nextName string
nextDoSetTime := false
if prevIdx < 0 {
// root dir, update the MD instead
de = md.data.Dir
} else {
prevDir := path{
FolderBranch: dir.FolderBranch,
path: dir.path[:prevIdx+1],
}
// First, check the localBcache, which could contain
// blocks that were modified across multiple calls to
// syncBlock.
var ok bool
prevDblock, ok = lbc[prevDir.tailPointer()]
if !ok {
// If the block isn't in the local bcache, we
// have to fetch it, possibly from the
// network. Directory blocks are only ever
// modified while holding mdWriterLock, so it's
// safe to fetch them one at a time.
prevDblock, err = fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(),
prevDir, blockWrite)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
// modify the direntry for currName; make one
// if it doesn't exist (which should only
// happen the first time around).
//
// TODO: Pull the creation out of here and
// into createEntryLocked().
if de, ok = prevDblock.Children[currName]; !ok {
// If this isn't the first time
// around, we have an error.
if len(newPath.path) > 1 {
return path{}, DirEntry{}, nil, NoSuchNameError{currName}
}
// If this is a file, the size should be 0. (TODO:
// Ensure this.) If this is a directory, the size will
// be filled in below. The times will be filled in
// below as well, since we should only be creating a
// new directory entry when doSetTime is true.
de = DirEntry{
EntryInfo: EntryInfo{
Type: entryType,
Size: 0,
},
}
// If we're creating a new directory entry, the
// parent's times must be set as well.
nextDoSetTime = true
}
currBlock = prevDblock
nextName = prevDir.tailName()
}
if de.Type == Dir {
// TODO: When we use indirect dir blocks,
// we'll have to calculate the size some other
// way.
de.Size = uint64(plainSize)
}
if prevIdx < 0 {
md.AddUpdate(md.data.Dir.BlockInfo, info)
} else if prevDe, ok := prevDblock.Children[currName]; ok {
md.AddUpdate(prevDe.BlockInfo, info)
} else {
// this is a new block
md.AddRefBlock(info)
}
if len(refPath.path) > 1 {
refPath = *refPath.parentPath()
}
de.BlockInfo = info
if doSetTime {
if mtime {
de.Mtime = now
}
if ctime {
de.Ctime = now
}
}
if !newDe.IsInitialized() {
newDe = de
}
if prevIdx < 0 {
md.data.Dir = de
} else {
prevDblock.Children[currName] = de
}
currName = nextName
// Stop before we get to the common ancestor; it will be taken care of
// on the next sync call
if prevIdx >= 0 && dir.path[prevIdx].BlockPointer == stopAt {
// Put this back into the cache as dirty -- the next
// syncBlock call will ready it.
dblock, ok := currBlock.(*DirBlock)
if !ok {
return path{}, DirEntry{}, nil, BadDataError{stopAt.ID}
}
lbc[stopAt] = dblock
break
}
doSetTime = nextDoSetTime
}
return newPath, newDe, bps, nil
}
// syncBlockLock calls syncBlock under mdWriterLock.
func (fbo *folderBranchOps) syncBlockLocked(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.syncBlock(ctx, lState, uid, md, newBlock, dir, name,
entryType, mtime, ctime, stopAt, lbc)
}
// syncBlockForConflictResolution calls syncBlock unlocked, since
// conflict resolution can handle MD revision number conflicts
// correctly.
func (fbo *folderBranchOps) syncBlockForConflictResolution(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
return fbo.syncBlock(
ctx, lState, uid, md, newBlock, dir,
name, entryType, mtime, ctime, stopAt, lbc)
}
// entryType must not be Sym.
func (fbo *folderBranchOps) syncBlockAndCheckEmbedLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer, lbc localBcache) (
path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return path{}, DirEntry{}, nil, err
}
newPath, newDe, bps, err := fbo.syncBlockLocked(
ctx, lState, uid, md, newBlock, dir, name, entryType, mtime,
ctime, stopAt, lbc)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// Do the block changes need their own blocks? Unembed only if
// this is the final call to this function with this MD.
if stopAt == zeroPtr {
bsplit := fbo.config.BlockSplitter()
if !bsplit.ShouldEmbedBlockChanges(&md.data.Changes) {
err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes,
uid)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
}
return newPath, newDe, bps, nil
}
// Returns whether the given error is one that shouldn't block the
// removal of a file or directory.
//
// TODO: Consider other errors recoverable, e.g. ones that arise from
// present but corrupted blocks?
func isRecoverableBlockErrorForRemoval(err error) bool {
return isRecoverableBlockError(err)
}
func isRetriableError(err error, retries int) bool {
_, isExclOnUnmergedError := err.(ExclOnUnmergedError)
_, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError)
recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError ||
isRecoverableBlockError(err)
return recoverable && retries < maxRetriesOnRecoverableErrors
}
func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error {
if bps == nil {
return nil
}
bcache := fbo.config.BlockCache()
for _, blockState := range bps.blockStates {
newPtr := blockState.blockPtr
// only cache this block if we made a brand new block, not if
// we just incref'd some other block.
if !newPtr.IsFirstRef() {
continue
}
if err := bcache.Put(newPtr, fbo.id(), blockState.block,
TransientEntry); err != nil {
return err
}
}
return nil
}
// Returns true if the passed error indicates a revision conflict.
func isRevisionConflict(err error) bool {
if err == nil {
return false
}
_, isConflictRevision := err.(MDServerErrorConflictRevision)
_, isConflictPrevRoot := err.(MDServerErrorConflictPrevRoot)
_, isConflictDiskUsage := err.(MDServerErrorConflictDiskUsage)
_, isConditionFailed := err.(MDServerErrorConditionFailed)
_, isConflictFolderMapping := err.(MDServerErrorConflictFolderMapping)
_, isJournal := err.(MDJournalConflictError)
return isConflictRevision || isConflictPrevRoot ||
isConflictDiskUsage || isConditionFailed ||
isConflictFolderMapping || isJournal
}
func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// finally, write out the new metadata
mdops := fbo.config.MDOps()
doUnmergedPut := true
mergedRev := MetadataRevisionUninitialized
oldPrevRoot := md.PrevRoot()
var mdID MdID
// This puts on a delay on any cancellations arriving to ctx. It is intended
// to work sort of like a critical section, except that there isn't an
// explicit call to exit the critical section. The cancellation, if any, is
// triggered after a timeout (i.e.
// fbo.config.DelayedCancellationGracePeriod()).
//
// The purpose of trying to avoid cancellation once we start MD write is to
// avoid having an unpredictable perceived MD state. That is, when
// runUnlessCanceled returns Canceled on cancellation, application receives
// an EINTR, and would assume the operation didn't succeed. But the MD write
// continues, and there's a chance the write will succeed, meaning the
// operation succeeds. This contradicts with the application's perception
// through error code and can lead to horrible situations. An easily caught
// situation is when application calls Create with O_EXCL set, gets an EINTR
// while MD write succeeds, retries and gets an EEXIST error. If users hit
// Ctrl-C, this might not be a big deal. However, it also happens for other
// interrupts. For applications that use signals to communicate, e.g.
// SIGALRM and SIGUSR1, this can happen pretty often, which renders broken.
if err = EnableDelayedCancellationWithGracePeriod(
ctx, fbo.config.DelayedCancellationGracePeriod()); err != nil {
return err
}
// we don't explicitly clean up (by using a defer) CancellationDelayer here
// because sometimes fuse makes another call using the same ctx. For example, in
// fuse's Create call handler, a dir.Create is followed by an Attr call. If
// we do a deferred cleanup here, if an interrupt has been received, it can
// cause ctx to be canceled before Attr call finishes, which causes FUSE to
// return EINTR for the Create request. But at this point, the request may
// have already succeeded. Returning EINTR makes application thinks the file
// is not created successfully.
if fbo.isMasterBranchLocked(lState) {
// only do a normal Put if we're not already staged.
mdID, err = mdops.Put(ctx, md)
if doUnmergedPut = isRevisionConflict(err); doUnmergedPut {
fbo.log.CDebugf(ctx, "Conflict: %v", err)
mergedRev = md.Revision()
if excl == WithExcl {
// If this was caused by an exclusive create, we shouldn't do an
// UnmergedPut, but rather try to get newest update from server, and
// retry afterwards.
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
return ExclOnUnmergedError{}
}
} else if err != nil {
return err
}
} else if excl == WithExcl {
return ExclOnUnmergedError{}
}
if doUnmergedPut {
// We're out of date, and this is not an exclusive write, so put it as an
// unmerged MD.
mdID, err = mdops.PutUnmerged(ctx, md)
if isRevisionConflict(err) {
// Self-conflicts are retried in `doMDWriteWithRetry`.
err = UnmergedSelfConflictError{err}
}
if err != nil {
return err
}
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), mergedRev)
} else {
fbo.setBranchIDLocked(lState, NullBranchID)
if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() {
// Queue this folder for rekey if the bit was set and it's not a copy.
// This is for the case where we're coming out of conflict resolution.
// So why don't we do this in finalizeResolution? Well, we do but we don't
// want to block on a rekey so we queue it. Because of that it may fail
// due to a conflict with some subsequent write. By also handling it here
// we'll always retry if we notice we haven't been successful in clearing
// the bit yet. Note that I haven't actually seen this happen but it seems
// theoretically possible.
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
}
md.loadCachedBlockChanges(ctx, bps)
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
irmd := MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now())
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
fbo.notifyBatchLocked(ctx, lState, irmd)
return nil
}
func (fbo *folderBranchOps) waitForJournalLocked(ctx context.Context,
lState *lockState, jServer *JournalServer) error {
fbo.mdWriterLock.AssertLocked(lState)
if !TLFJournalEnabled(fbo.config, fbo.id()) {
// Nothing to do.
return nil
}
if err := jServer.Wait(ctx, fbo.id()); err != nil {
return err
}
// Make sure everything flushed successfully, since we're holding
// the writer lock, no other revisions could have snuck in.
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
return err
}
if jStatus.RevisionEnd != MetadataRevisionUninitialized {
return errors.Errorf("Couldn't flush all MD revisions; current "+
"revision end for the journal is %d", jStatus.RevisionEnd)
}
if jStatus.LastFlushErr != "" {
return errors.Errorf("Couldn't flush the journal: %s",
jStatus.LastFlushErr)
}
return nil
}
func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata,
lastWriterVerifyingKey kbfscrypto.VerifyingKey) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
oldPrevRoot := md.PrevRoot()
// Write out the new metadata. If journaling is enabled, we don't
// want the rekey to hit the journal and possibly end up on a
// conflict branch, so wait for the journal to flush and then push
// straight to the server. TODO: we're holding the writer lock
// while flushing the journal here (just like for exclusive
// writes), which may end up blocking incoming writes for a long
// time. Rekeys are pretty rare, but if this becomes an issue
// maybe we should consider letting these hit the journal and
// scrubbing them when converting it to a branch.
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
if err = fbo.waitForJournalLocked(ctx, lState, jServer); err != nil {
return err
}
mdOps = jServer.delegateMDOps
}
mdID, err := mdOps.Put(ctx, md)
isConflict := isRevisionConflict(err)
if err != nil && !isConflict {
return err
}
if isConflict {
// Drop this block. We've probably collided with someone also
// trying to rekey the same folder but that's not necessarily
// the case. We'll queue another rekey just in case. It should
// be safe as it's idempotent. We don't want any rekeys present
// in unmerged history or that will just make a mess.
fbo.config.RekeyQueue().Enqueue(md.TlfID())
return RekeyConflictError{err}
}
fbo.setBranchIDLocked(lState, NullBranchID)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
md.loadCachedBlockChanges(ctx, nil)
var key kbfscrypto.VerifyingKey
if md.IsWriterMetadataCopiedSet() {
key = lastWriterVerifyingKey
} else {
var err error
key, err = fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState,
MakeImmutableRootMetadata(md, key, mdID, fbo.config.Clock().Now()),
rebased)
if err != nil {
return err
}
// Explicitly set the latest merged revision, since if journaling
// is on, `setHeadLocked` will not do it for us (even though
// rekeys bypass the journal).
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
return nil
}
func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *GCOp) (
err error) {
lState := makeFBOLockState()
// Lock the folder so we can get an internally-consistent MD
// revision number.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
if md.MergedStatus() == Unmerged {
return UnexpectedUnmergedPutError{}
}
md.AddOp(gco)
md.SetLastGCRevision(gco.LatestRev)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
oldPrevRoot := md.PrevRoot()
// finally, write out the new metadata
mdID, err := fbo.config.MDOps().Put(ctx, md)
if err != nil {
// Don't allow garbage collection to put us into a conflicting
// state; just wait for the next period.
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
md.loadCachedBlockChanges(ctx, bps)
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
irmd := MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now())
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
fbo.notifyBatchLocked(ctx, lState, irmd)
return nil
}
func (fbo *folderBranchOps) syncBlockAndFinalizeLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer, excl Excl) (de DirEntry, err error) {
fbo.mdWriterLock.AssertLocked(lState)
_, de, bps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newBlock, dir, name, entryType, mtime,
ctime, zeroPtr, nil)
if err != nil {
return DirEntry{}, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(
md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
_, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return DirEntry{}, err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl)
if err != nil {
return DirEntry{}, err
}
return de, nil
}
func checkDisallowedPrefixes(name string) error {
for _, prefix := range disallowedPrefixes {
if strings.HasPrefix(name, prefix) {
return DisallowedPrefixError{name, prefix}
}
}
return nil
}
func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata,
dirPath path, newName string) error {
// Check that the directory isn't past capacity already.
var currSize uint64
if dirPath.hasValidParent() {
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath)
if err != nil {
return err
}
currSize = de.Size
} else {
// dirPath is just the root.
currSize = md.data.Dir.Size
}
// Just an approximation since it doesn't include the size of the
// directory entry itself, but that's ok -- at worst it'll be an
// off-by-one-entry error, and since there's a maximum name length
// we can't get in too much trouble.
if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() {
return DirTooBigError{dirPath, currSize + uint64(len(newName)),
fbo.config.MaxDirBytes()}
}
return nil
}
// PathType returns path type
func (fbo *folderBranchOps) PathType() PathType {
if fbo.folderBranch.Tlf.IsPublic() {
return PublicPathType
}
return PrivatePathType
}
// canonicalPath returns full canonical path for dir node and name.
func (fbo *folderBranchOps) canonicalPath(ctx context.Context, dir Node, name string) (string, error) {
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return "", err
}
return BuildCanonicalPath(fbo.PathType(), dirPath.String(), name), nil
}
// entryType must not by Sym.
func (fbo *folderBranchOps) createEntryLocked(
ctx context.Context, lState *lockState, dir Node, name string,
entryType EntryType, excl Excl) (Node, DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(name); err != nil {
return nil, DirEntry{}, err
}
if uint32(len(name)) > fbo.config.MaxNameBytes() {
return nil, DirEntry{},
NameTooLongError{name, fbo.config.MaxNameBytes()}
}
filename, err := fbo.canonicalPath(ctx, dir, name)
if err != nil {
return nil, DirEntry{}, err
}
// verify we have permission to write
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return nil, DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockWrite)
if err != nil {
return nil, DirEntry{}, err
}
// does name already exist?
if _, ok := dblock.Children[name]; ok {
return nil, DirEntry{}, NameExistsError{name}
}
if err := fbo.checkNewDirSize(
ctx, lState, md.ReadOnly(), dirPath, name); err != nil {
return nil, DirEntry{}, err
}
co, err := newCreateOp(name, dirPath.tailPointer(), entryType)
if err != nil {
return nil, DirEntry{}, err
}
md.AddOp(co)
// create new data block
var newBlock Block
// XXX: for now, put a unique ID in every new block, to make sure it
// has a unique block ID. This may not be needed once we have encryption.
if entryType == Dir {
newBlock = &DirBlock{
Children: make(map[string]DirEntry),
}
} else {
newBlock = &FileBlock{}
}
de, err := fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, newBlock, dirPath, name, entryType,
true, true, zeroPtr, excl)
if err != nil {
return nil, DirEntry{}, err
}
node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
return node, de, nil
}
func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context,
lState *lockState, fn func(lState *lockState) error) error {
doUnlock := false
defer func() {
if doUnlock {
fbo.mdWriterLock.Unlock(lState)
}
}()
for i := 0; ; i++ {
fbo.mdWriterLock.Lock(lState)
doUnlock = true
// Make sure we haven't been canceled before doing anything
// too serious.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := fn(lState)
if isRetriableError(err, i) {
fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err)
// Release the lock to give someone else a chance
doUnlock = false
fbo.mdWriterLock.Unlock(lState)
if _, ok := err.(ExclOnUnmergedError); ok {
if err = fbo.cr.Wait(ctx); err != nil {
return err
}
} else if _, ok := err.(UnmergedSelfConflictError); ok {
// We can only get here if we are already on an
// unmerged branch and an errored PutUnmerged did make
// it to the mdserver. Let's force sync, with a fresh
// context so the observer doesn't ignore the updates
// (but tie the cancels together).
newCtx := fbo.ctxWithFBOID(context.Background())
newCtx, cancel := context.WithCancel(newCtx)
defer cancel()
go func() {
select {
case <-ctx.Done():
cancel()
case <-newCtx.Done():
}
}()
fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+
"(%v); forcing a sync", err)
err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState)
if err != nil {
return err
}
cancel()
}
continue
} else if err != nil {
return err
}
return nil
}
}
func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled(
ctx context.Context, fn func(lState *lockState) error) error {
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
return fbo.doMDWriteWithRetry(ctx, lState, fn)
})
}
func (fbo *folderBranchOps) CreateDir(
ctx context.Context, dir Node, path string) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateDir %s %s", getNodeIDStr(dir), path)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateDir %s %s done: %v %+v",
getNodeIDStr(dir), path, getNodeIDStr(n), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl)
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) CreateFile(
ctx context.Context, dir Node, path string, isExec bool, excl Excl) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateFile %s %s isExec=%v Excl=%s",
getNodeIDStr(dir), path, isExec, excl)
defer func() {
fbo.deferLog.CDebugf(ctx,
"CreateFile %s %s isExec=%v Excl=%s done: %v %+v",
getNodeIDStr(dir), path, isExec, excl,
getNodeIDStr(n), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var entryType EntryType
if isExec {
entryType = Exec
} else {
entryType = File
}
// If journaling is turned on, an exclusive create may end up on a
// conflict branch.
if excl == WithExcl && TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.log.CDebugf(ctx, "Exclusive create status is being discarded.")
excl = NoExcl
}
if excl == WithExcl {
if err = fbo.cr.Wait(ctx); err != nil {
return nil, EntryInfo{}, err
}
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl)
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) createLinkLocked(
ctx context.Context, lState *lockState, dir Node, fromName string,
toPath string) (DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(fromName); err != nil {
return DirEntry{}, err
}
if uint32(len(fromName)) > fbo.config.MaxNameBytes() {
return DirEntry{},
NameTooLongError{fromName, fbo.config.MaxNameBytes()}
}
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockWrite)
if err != nil {
return DirEntry{}, err
}
// TODO: validate inputs
// does name already exist?
if _, ok := dblock.Children[fromName]; ok {
return DirEntry{}, NameExistsError{fromName}
}
if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(),
dirPath, fromName); err != nil {
return DirEntry{}, err
}
co, err := newCreateOp(fromName, dirPath.tailPointer(), Sym)
if err != nil {
return DirEntry{}, err
}
md.AddOp(co)
// Create a direntry for the link, and then sync
now := fbo.nowUnixNano()
dblock.Children[fromName] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
Size: uint64(len(toPath)),
SymPath: toPath,
Mtime: now,
Ctime: now,
},
}
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *dirPath.parentPath(),
dirPath.tailName(), Dir, true, true, zeroPtr, NoExcl)
if err != nil {
return DirEntry{}, err
}
return dblock.Children[fromName], nil
}
func (fbo *folderBranchOps) CreateLink(
ctx context.Context, dir Node, fromName string, toPath string) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateLink %s %s -> %s",
getNodeIDStr(dir), fromName, toPath)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateLink %s %s -> %s done: %+v",
getNodeIDStr(dir), fromName, toPath, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return EntryInfo{}, err
}
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set ei directly, as that can cause a race when
// the Create is canceled.
de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath)
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return EntryInfo{}, err
}
return retEntryInfo, nil
}
// unrefEntry modifies md to unreference all relevant blocks for the
// given entry.
func (fbo *folderBranchOps) unrefEntry(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, de DirEntry,
name string) error {
md.AddUnrefBlock(de.BlockInfo)
// construct a path for the child so we can unlink with it.
childPath := dir.ChildPath(name, de.BlockPointer)
// If this is an indirect block, we need to delete all of its
// children as well. NOTE: non-empty directories can't be
// removed, so no need to check for indirect directory blocks
// here.
if de.Type == File || de.Type == Exec {
blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos(
ctx, lState, md.ReadOnly(), childPath)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
}
for _, blockInfo := range blockInfos {
md.AddUnrefBlock(blockInfo)
}
}
return nil
}
func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, name string) error {
fbo.mdWriterLock.AssertLocked(lState)
pblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dir, blockWrite)
if err != nil {
return err
}
// make sure the entry exists
de, ok := pblock.Children[name]
if !ok {
return NoSuchNameError{name}
}
ro, err := newRmOp(name, dir.tailPointer())
if err != nil {
return err
}
md.AddOp(ro)
err = fbo.unrefEntry(ctx, lState, md, dir, de, name)
if err != nil {
return err
}
// the actual unlink
delete(pblock.Children, name)
// sync the parent directory
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, pblock, *dir.parentPath(), dir.tailName(),
Dir, true, true, zeroPtr, NoExcl)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) removeDirLocked(ctx context.Context,
lState *lockState, dir Node, dirName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
pblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
de, ok := pblock.Children[dirName]
if !ok {
return NoSuchNameError{dirName}
}
// construct a path for the child so we can check for an empty dir
childPath := dirPath.ChildPath(dirName, de.BlockPointer)
childBlock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), childPath, blockRead)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
} else if len(childBlock.Children) > 0 {
return DirNotEmptyError{dirName}
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, dirName)
}
func (fbo *folderBranchOps) RemoveDir(
ctx context.Context, dir Node, dirName string) (err error) {
fbo.log.CDebugf(ctx, "RemoveDir %s %s", getNodeIDStr(dir), dirName)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveDir %s %s done: %+v",
getNodeIDStr(dir), dirName, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.removeDirLocked(ctx, lState, dir, dirName)
})
}
func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node,
name string) (err error) {
fbo.log.CDebugf(ctx, "RemoveEntry %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveEntry %s %s done: %+v",
getNodeIDStr(dir), name, err)
}()
err = fbo.checkNode(dir)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, name)
})
}
func (fbo *folderBranchOps) renameLocked(
ctx context.Context, lState *lockState, oldParent path,
oldName string, newParent path, newName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
oldPBlock, newPBlock, newDe, lbc, err := fbo.blocks.PrepRename(
ctx, lState, md, oldParent, oldName, newParent, newName)
if err != nil {
return err
}
// does name exist?
if de, ok := newPBlock.Children[newName]; ok {
// Usually higher-level programs check these, but just in case.
if de.Type == Dir && newDe.Type != Dir {
return NotDirError{newParent.ChildPathNoPtr(newName)}
} else if de.Type != Dir && newDe.Type == Dir {
return NotFileError{newParent.ChildPathNoPtr(newName)}
}
if de.Type == Dir {
// The directory must be empty.
oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState,
md.ReadOnly(), de.BlockPointer, newParent.Branch,
newParent.ChildPathNoPtr(newName))
if err != nil {
return err
}
if len(oldTargetDir.Children) != 0 {
fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+
" (%s/%s) not allowed.", newParent, newName)
return DirNotEmptyError{newName}
}
}
// Delete the old block pointed to by this direntry.
err := fbo.unrefEntry(ctx, lState, md, newParent, de, newName)
if err != nil {
return err
}
}
// only the ctime changes
newDe.Ctime = fbo.nowUnixNano()
newPBlock.Children[newName] = newDe
delete(oldPBlock.Children, oldName)
// find the common ancestor
var i int
found := false
// the root block will always be the same, so start at number 1
for i = 1; i < len(oldParent.path) && i < len(newParent.path); i++ {
if oldParent.path[i].ID != newParent.path[i].ID {
found = true
i--
break
}
}
if !found {
// if we couldn't find one, then the common ancestor is the
// last node in the shorter path
if len(oldParent.path) < len(newParent.path) {
i = len(oldParent.path) - 1
} else {
i = len(newParent.path) - 1
}
}
commonAncestor := oldParent.path[i].BlockPointer
oldIsCommon := oldParent.tailPointer() == commonAncestor
newIsCommon := newParent.tailPointer() == commonAncestor
newOldPath := path{FolderBranch: oldParent.FolderBranch}
var oldBps *blockPutState
if oldIsCommon {
if newIsCommon {
// if old and new are both the common ancestor, there is
// nothing to do (syncBlock will take care of everything)
} else {
// If the old one is common and the new one is
// not, then the last
// syncBlockAndCheckEmbedLocked call will need
// to access the old one.
lbc[oldParent.tailPointer()] = oldPBlock
}
} else {
if newIsCommon {
// If the new one is common, then the first
// syncBlockAndCheckEmbedLocked call will need to access
// it.
lbc[newParent.tailPointer()] = newPBlock
}
// The old one is not the common ancestor, so we need to sync it.
// TODO: optimize by pushing blocks from both paths in parallel
newOldPath, _, oldBps, err = fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, oldPBlock, *oldParent.parentPath(), oldParent.tailName(),
Dir, true, true, commonAncestor, lbc)
if err != nil {
return err
}
}
newNewPath, _, newBps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newPBlock, *newParent.parentPath(), newParent.tailName(),
Dir, true, true, zeroPtr, lbc)
if err != nil {
return err
}
// newOldPath is really just a prefix now. A copy is necessary as an
// append could cause the new path to contain nodes from the old path.
newOldPath.path = append(make([]pathNode, i+1, i+1), newOldPath.path...)
copy(newOldPath.path[:i+1], newNewPath.path[:i+1])
// merge and finalize the blockPutStates
if oldBps != nil {
newBps.mergeOtherBps(oldBps)
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(
md.ReadOnly(), newBps, blockDeleteOnMDFail)
}
}()
_, err = doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(),
fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *newBps)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, newBps, NoExcl)
}
func (fbo *folderBranchOps) Rename(
ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) (err error) {
fbo.log.CDebugf(ctx, "Rename %s/%s -> %s/%s", getNodeIDStr(oldParent),
oldName, getNodeIDStr(newParent), newName)
defer func() {
fbo.deferLog.CDebugf(ctx, "Rename %s/%s -> %s/%s done: %+v",
getNodeIDStr(oldParent), oldName,
getNodeIDStr(newParent), newName, err)
}()
err = fbo.checkNode(newParent)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent)
if err != nil {
return err
}
newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent)
if err != nil {
return err
}
// only works for paths within the same topdir
if oldParentPath.FolderBranch != newParentPath.FolderBranch {
return RenameAcrossDirsError{}
}
return fbo.renameLocked(ctx, lState, oldParentPath, oldName,
newParentPath, newName)
})
}
func (fbo *folderBranchOps) Read(
ctx context.Context, file Node, dest []byte, off int64) (
n int64, err error) {
fbo.log.CDebugf(ctx, "Read %s %d %d", getNodeIDStr(file),
len(dest), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Read %s %d %d done: %+v",
getNodeIDStr(file), len(dest), off, err)
}()
err = fbo.checkNode(file)
if err != nil {
return 0, err
}
filePath, err := fbo.pathFromNodeForRead(file)
if err != nil {
return 0, err
}
{
// It seems git isn't handling EINTR from some of its read calls (likely
// fread), which causes it to get corrupted data (which leads to coredumps
// later) when a read system call on pack files gets interrupted. This
// enables delayed cancellation for Read if the file path contains `.git`.
//
// TODO: get a patch in git, wait for sufficiently long time for people to
// upgrade, and remove this.
// allow turning this feature off by env var to make life easier when we
// try to fix git.
if _, isSet := os.LookupEnv("KBFS_DISABLE_GIT_SPECIAL_CASE"); !isSet {
for _, n := range filePath.path {
if n.Name == ".git" {
EnableDelayedCancellationWithGracePeriod(ctx, fbo.config.DelayedCancellationGracePeriod())
break
}
}
}
}
// Don't let the goroutine below write directly to the return
// variable, since if the context is canceled the goroutine might
// outlast this function call, and end up in a read/write race
// with the caller.
var bytesRead int64
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// verify we have permission to read
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
bytesRead, err = fbo.blocks.Read(
ctx, lState, md.ReadOnly(), filePath, dest, off)
return err
})
if err != nil {
return 0, err
}
return bytesRead, nil
}
func (fbo *folderBranchOps) Write(
ctx context.Context, file Node, data []byte, off int64) (err error) {
fbo.log.CDebugf(ctx, "Write %s %d %d", getNodeIDStr(file),
len(data), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Write %s %d %d done: %+v",
getNodeIDStr(file), len(data), off, err)
}()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), file, data, off)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) Truncate(
ctx context.Context, file Node, size uint64) (err error) {
fbo.log.CDebugf(ctx, "Truncate %s %d", getNodeIDStr(file), size)
defer func() {
fbo.deferLog.CDebugf(ctx, "Truncate %s %d done: %+v",
getNodeIDStr(file), size, err)
}()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Truncate(
ctx, lState, md.ReadOnly(), file, size)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) setExLocked(
ctx context.Context, lState *lockState, file path,
ex bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md.ReadOnly(), file)
if err != nil {
return err
}
// If the file is a symlink, do nothing (to match ext4
// behavior).
if de.Type == Sym || de.Type == Dir {
fbo.log.CDebugf(ctx, "Ignoring setex on type %s", de.Type)
return nil
}
if ex && (de.Type == File) {
de.Type = Exec
} else if !ex && (de.Type == Exec) {
de.Type = File
} else {
// Treating this as a no-op, without updating the ctime, is a
// POSIX violation, but it's an important optimization to keep
// permissions-preserving rsyncs fast.
fbo.log.CDebugf(ctx, "Ignoring no-op setex")
return nil
}
de.Ctime = fbo.nowUnixNano()
parentPath := file.parentPath()
sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(),
exAttr, file.tailPointer())
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this setex.
if md.data.Dir.BlockPointer.ID != file.path[0].BlockPointer.ID {
fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v",
file.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
md.AddOp(sao)
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr, NoExcl)
return err
}
func (fbo *folderBranchOps) SetEx(
ctx context.Context, file Node, ex bool) (err error) {
fbo.log.CDebugf(ctx, "SetEx %s %t", getNodeIDStr(file), ex)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetEx %s %t done: %+v",
getNodeIDStr(file), ex, err)
}()
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setExLocked(ctx, lState, filePath, ex)
})
}
func (fbo *folderBranchOps) setMtimeLocked(
ctx context.Context, lState *lockState, file path,
mtime *time.Time) error {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md.ReadOnly(), file)
if err != nil {
return err
}
de.Mtime = mtime.UnixNano()
// setting the mtime counts as changing the file MD, so must set ctime too
de.Ctime = fbo.nowUnixNano()
parentPath := file.parentPath()
sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(),
mtimeAttr, file.tailPointer())
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this
// setmtime.
if md.data.Dir.BlockPointer.ID != file.path[0].BlockPointer.ID {
fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v",
file.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
md.AddOp(sao)
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr, NoExcl)
return err
}
func (fbo *folderBranchOps) SetMtime(
ctx context.Context, file Node, mtime *time.Time) (err error) {
fbo.log.CDebugf(ctx, "SetMtime %s %v", getNodeIDStr(file), mtime)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetMtime %s %v done: %+v",
getNodeIDStr(file), mtime, err)
}()
if mtime == nil {
// Can happen on some OSes (e.g. OSX) when trying to set the atime only
return nil
}
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setMtimeLocked(ctx, lState, filePath, mtime)
})
}
func (fbo *folderBranchOps) syncLocked(ctx context.Context,
lState *lockState, file path) (stillDirty bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
// if the cache for this file isn't dirty, we're done
if !fbo.blocks.IsDirty(lState, file) {
return false, nil
}
// Verify we have permission to write. We do this after the dirty
// check because otherwise readers who sync clean files on close
// would get an error.
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return true, err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this sync.
if md.data.Dir.BlockPointer.ID != file.path[0].BlockPointer.ID {
fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v",
file.tailPointer())
// Removing the cached info here is a little sketchy,
// since there's no guarantee that this sync comes
// from closing the file, and we still want to serve
// stat calls accurately if the user still has an open
// handle to this file.
//
// Note in particular that if a file just had a dirty
// directory entry cached (due to an attribute change on a
// removed file, for example), this will clear that attribute
// change. If there's still an open file handle, the user
// won't be able to see the change anymore.
//
// TODO: Hook this in with the node cache GC logic to be
// perfectly accurate (but at the same time, we'd then have to
// fix up the intentional panic in the background flusher to
// be more tolerant of long-lived dirty, removed files).
return true, fbo.blocks.ClearCacheInfo(lState, file)
}
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return true, err
}
// notify the daemon that a write is being performed
fbo.config.Reporter().Notify(ctx, writeNotification(file, false))
defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true))
// Filled in by doBlockPuts below.
var blocksToRemove []BlockPointer
fblock, bps, lbc, syncState, err :=
fbo.blocks.StartSync(ctx, lState, md, uid, file)
defer func() {
fbo.blocks.CleanupSyncState(
ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err)
}()
if err != nil {
return true, err
}
newPath, _, newBps, err :=
fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, fblock, *file.parentPath(),
file.tailName(), File, true, true, zeroPtr, lbc)
if err != nil {
return true, err
}
bps.mergeOtherBps(newBps)
// Note: We explicitly don't call fbo.fbm.cleanUpBlockState here
// when there's an error, because it's possible some of the blocks
// will be reused in a future attempt at this same sync, and we
// don't want them cleaned up in that case. Instead, the
// FinishSync call below will take care of that.
blocksToRemove, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return true, err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl)
if err != nil {
return true, err
}
// At this point, all reads through the old path (i.e., file)
// see writes that happened since StartSync, whereas all reads
// through the new path (newPath) don't.
//
// TODO: This isn't completely correct, since reads that
// happen after a write should always see the new data.
//
// After FinishSync succeeds, then reads through both the old
// and the new paths will see the writes that happened during
// the sync.
return fbo.blocks.FinishSync(ctx, lState, file, newPath,
md.ReadOnly(), syncState, fbo.fbm)
}
func (fbo *folderBranchOps) Sync(ctx context.Context, file Node) (err error) {
fbo.log.CDebugf(ctx, "Sync %s", getNodeIDStr(file))
defer func() {
fbo.deferLog.CDebugf(ctx, "Sync %s done: %+v",
getNodeIDStr(file), err)
}()
err = fbo.checkNode(file)
if err != nil {
return
}
var stillDirty bool
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
stillDirty, err = fbo.syncLocked(ctx, lState, filePath)
return err
})
if err != nil {
return err
}
if !stillDirty {
fbo.status.rmDirtyNode(file)
}
return nil
}
func (fbo *folderBranchOps) FolderStatus(
ctx context.Context, folderBranch FolderBranch) (
fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) {
fbo.log.CDebugf(ctx, "Status")
defer func() { fbo.deferLog.CDebugf(ctx, "Status done: %+v", err) }()
if folderBranch != fbo.folderBranch {
return FolderBranchStatus{}, nil,
WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.status.getStatus(ctx, &fbo.blocks)
}
func (fbo *folderBranchOps) Status(
ctx context.Context) (
fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) {
return KBFSStatus{}, nil, InvalidOpError{}
}
// RegisterForChanges registers a single Observer to receive
// notifications about this folder/branch.
func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error {
// It's the caller's responsibility to make sure
// RegisterForChanges isn't called twice for the same Observer
fbo.observers.add(obs)
return nil
}
// UnregisterFromChanges stops an Observer from getting notifications
// about the folder/branch.
func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error {
fbo.observers.remove(obs)
return nil
}
// notifyBatchLocked sends out a notification for the most recent op
// in md.
func (fbo *folderBranchOps) notifyBatchLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) {
fbo.headLock.AssertLocked(lState)
lastOp := md.data.Changes.Ops[len(md.data.Changes.Ops)-1]
fbo.notifyOneOpLocked(ctx, lState, lastOp, md, false)
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md})
}
// searchForNode tries to figure out the path to the given
// blockPointer, using only the block updates that happened as part of
// a given MD update operation.
func (fbo *folderBranchOps) searchForNode(ctx context.Context,
ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) {
// Record which pointers are new to this update, and thus worth
// searching.
newPtrs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, update := range op.allUpdates() {
newPtrs[update.Ref] = true
}
for _, ref := range op.Refs() {
newPtrs[ref] = true
}
}
nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache,
[]BlockPointer{ptr}, newPtrs, md, md.data.Dir.BlockPointer)
if err != nil {
return nil, err
}
n, ok := nodeMap[ptr]
if !ok {
return nil, NodeNotFoundError{ptr}
}
return n, nil
}
func (fbo *folderBranchOps) unlinkFromCache(op op, oldDir BlockPointer,
node Node, name string) error {
// The entry could be under any one of the unref'd blocks, and
// it's safe to perform this when the pointer isn't real, so just
// try them all to avoid the overhead of looking up the right
// pointer in the old version of the block.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return err
}
childPath := p.ChildPathNoPtr(name)
// revert the parent pointer
childPath.path[len(childPath.path)-2].BlockPointer = oldDir
for _, ptr := range op.Unrefs() {
// It's ok to modify this path, since we break as soon as the
// node cache takes a reference to it.
childPath.path[len(childPath.path)-1].BlockPointer = ptr
found := fbo.nodeCache.Unlink(ptr.Ref(), childPath)
if found {
break
}
}
return nil
}
func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context,
lState *lockState, op op, md ImmutableRootMetadata, shouldPrefetch bool) {
fbo.headLock.AssertLocked(lState)
fbo.blocks.UpdatePointers(md, lState, op, shouldPrefetch)
var changes []NodeChange
switch realOp := op.(type) {
default:
return
case *createOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %s",
realOp.NewName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.NewName},
})
case *rmOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %s",
realOp.OldName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.OldName},
})
// If this node exists, then the child node might exist too,
// and we need to unlink it in the node cache.
err := fbo.unlinkFromCache(op, realOp.Dir.Unref, node, realOp.OldName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err)
return
}
case *renameOp:
oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.Ref())
if oldNode != nil {
changes = append(changes, NodeChange{
Node: oldNode,
DirUpdated: []string{realOp.OldName},
})
}
var newNode Node
if realOp.NewDir.Ref != zeroPtr {
newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.Ref())
if newNode != nil {
changes = append(changes, NodeChange{
Node: newNode,
DirUpdated: []string{realOp.NewName},
})
}
} else {
newNode = oldNode
if oldNode != nil {
// Add another name to the existing NodeChange.
changes[len(changes)-1].DirUpdated =
append(changes[len(changes)-1].DirUpdated, realOp.NewName)
}
}
if oldNode != nil {
fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%s to %s/%s",
realOp.Renamed, realOp.OldName, getNodeIDStr(oldNode), realOp.NewName,
getNodeIDStr(newNode))
if newNode == nil {
if childNode :=
fbo.nodeCache.Get(realOp.Renamed.Ref()); childNode != nil {
// if the childNode exists, we still have to update
// its path to go through the new node. That means
// creating nodes for all the intervening paths.
// Unfortunately we don't have enough information to
// know what the newPath is; we have to guess it from
// the updates.
var err error
newNode, err =
fbo.searchForNode(ctx, realOp.NewDir.Ref, md.ReadOnly())
if newNode == nil {
fbo.log.CErrorf(ctx, "Couldn't find the new node: %v",
err)
}
}
}
if newNode != nil {
// If new node exists as well, unlink any previously
// existing entry and move the node.
var unrefPtr BlockPointer
if oldNode != newNode {
unrefPtr = realOp.NewDir.Unref
} else {
unrefPtr = realOp.OldDir.Unref
}
err := fbo.unlinkFromCache(op, unrefPtr, newNode, realOp.NewName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err)
return
}
err = fbo.nodeCache.Move(realOp.Renamed.Ref(), newNode, realOp.NewName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't move node in cache: %v", err)
return
}
}
}
case *syncOp:
node := fbo.nodeCache.Get(realOp.File.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %s",
len(realOp.Writes), getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
FileUpdated: realOp.Writes,
})
case *setAttrOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %s",
realOp.Attr, realOp.Name, getNodeIDStr(node))
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return
}
childNode, err := fbo.blocks.UpdateCachedEntryAttributes(
ctx, lState, md.ReadOnly(), p, realOp)
if err != nil {
// TODO: Log error?
return
}
if childNode == nil {
return
}
changes = append(changes, NodeChange{
Node: childNode,
})
case *GCOp:
// Unreferenced blocks in a GCOp mean that we shouldn't cache
// them anymore
bcache := fbo.config.BlockCache()
for _, ptr := range realOp.Unrefs() {
if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil {
fbo.log.CDebugf(ctx,
"Couldn't delete transient entry for %v: %v", ptr, err)
}
}
case *resolutionOp:
// If there are any unrefs of blocks that have a node, this is an
// implied rmOp (see KBFS-1424).
reverseUpdates := make(map[BlockPointer]BlockPointer)
for _, unref := range op.Unrefs() {
// TODO: I will add logic here to unlink and invalidate any
// corresponding unref'd nodes.
node := fbo.nodeCache.Get(unref.Ref())
if node == nil {
// TODO: even if we don't have the node that was
// unreferenced, we might have its parent, and that
// parent might need an invalidation.
continue
}
// If there is a node, unlink and invalidate.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get path: %v", err)
continue
}
if !p.hasValidParent() {
fbo.log.CErrorf(ctx, "Removed node %s has no parent", p)
continue
}
parentPath := p.parentPath()
parentNode := fbo.nodeCache.Get(parentPath.tailPointer().Ref())
if parentNode != nil {
changes = append(changes, NodeChange{
Node: parentNode,
DirUpdated: []string{p.tailName()},
})
}
fbo.log.CDebugf(ctx, "resolutionOp: remove %s, node %s",
p.tailPointer(), getNodeIDStr(node))
// Revert the path back to the original BlockPointers,
// before the updates were applied.
if len(reverseUpdates) == 0 {
for _, update := range op.allUpdates() {
reverseUpdates[update.Ref] = update.Unref
}
}
for i, pNode := range p.path {
if oldPtr, ok := reverseUpdates[pNode.BlockPointer]; ok {
p.path[i].BlockPointer = oldPtr
}
}
fbo.nodeCache.Unlink(p.tailPointer().Ref(), p)
}
if len(changes) == 0 {
return
}
}
fbo.observers.batchChanges(ctx, changes)
}
func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) MetadataRevision {
fbo.headLock.AssertAnyLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head.Revision()
}
return MetadataRevisionUninitialized
}
func (fbo *folderBranchOps) getCurrMDRevision(
lState *lockState) MetadataRevision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.getCurrMDRevisionLocked(lState)
}
type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error
func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
// If there's anything in the journal, don't apply these MDs.
// Wait for CR to happen.
if fbo.isMasterBranchLocked(lState) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return err
}
if mergedRev != MetadataRevisionUninitialized {
if len(rmds) > 0 {
// We should update our view of the merged master though,
// to avoid re-registering for the same updates again.
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(
ctx, lState, rmds[len(rmds)-1].Revision(), false)
}()
}
fbo.log.CDebugf(ctx,
"Ignoring fetched revisions while MDs are in journal")
return nil
}
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// if we have staged changes, ignore all updates until conflict
// resolution kicks in. TODO: cache these for future use.
if !fbo.isMasterBranchLocked(lState) {
if len(rmds) > 0 {
latestMerged := rmds[len(rmds)-1]
// If we're running a journal, don't trust our own updates
// here because they might have come from our own journal
// before the conflict was detected. Assume we'll hear
// about the conflict via callbacks from the journal.
if TLFJournalEnabled(fbo.config, fbo.id()) {
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
if key == latestMerged.LastModifyingWriterVerifyingKey() {
return UnmergedError{}
}
}
// setHeadLocked takes care of merged case
fbo.setLatestMergedRevisionLocked(
ctx, lState, latestMerged.Revision(), false)
unmergedRev := MetadataRevisionUninitialized
if fbo.head != (ImmutableRootMetadata{}) {
unmergedRev = fbo.head.Revision()
}
fbo.cr.Resolve(unmergedRev, latestMerged.Revision())
}
return UnmergedError{}
}
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return errors.New("Ignoring MD updates while writes are dirty")
}
appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds))
for _, rmd := range rmds {
// check that we're applying the expected MD revision
if rmd.Revision() <= fbo.getCurrMDRevisionLocked(lState) {
// Already caught up!
continue
}
if err := isReadableOrError(ctx, fbo.config.KBPKI(), rmd.ReadOnly()); err != nil {
return err
}
err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false)
if err != nil {
return err
}
// No new operations in these.
if rmd.IsWriterMetadataCopiedSet() {
continue
}
for _, op := range rmd.data.Changes.Ops {
fbo.notifyOneOpLocked(ctx, lState, op, rmd, true)
}
appliedRevs = append(appliedRevs, rmd)
}
if len(appliedRevs) > 0 {
fbo.editHistory.UpdateHistory(ctx, appliedRevs)
}
return nil
}
func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// go backwards through the updates
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
// on undo, it's ok to re-apply the current revision since you
// need to invert all of its ops.
//
// This duplicates a check in
// fbo.setHeadPredecessorLocked. TODO: Remove this
// duplication.
if rmd.Revision() != fbo.getCurrMDRevisionLocked(lState) &&
rmd.Revision() != fbo.getCurrMDRevisionLocked(lState)-1 {
return MDUpdateInvertError{rmd.Revision(),
fbo.getCurrMDRevisionLocked(lState)}
}
// TODO: Check that the revisions are equal only for
// the first iteration.
if rmd.Revision() < fbo.getCurrMDRevisionLocked(lState) {
err := fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
}
// iterate the ops in reverse and invert each one
ops := rmd.data.Changes.Ops
for j := len(ops) - 1; j >= 0; j-- {
io, err := invertOpForLocalNotifications(ops[j])
if err != nil {
fbo.log.CWarningf(ctx,
"got error %v when invert op %v; "+
"skipping. Open file handles "+
"may now be in an invalid "+
"state, which can be fixed by "+
"either closing them all or "+
"restarting KBFS.",
err, ops[j])
continue
}
fbo.notifyOneOpLocked(ctx, lState, io, rmd, false)
}
}
// TODO: update the edit history?
return nil
}
func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.applyMDUpdatesLocked(ctx, lState, rmds)
}
func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) MetadataRevision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.latestMergedRevision
}
// caller should have held fbo.headLock
func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev MetadataRevision, allowBackward bool) {
fbo.headLock.AssertLocked(lState)
if rev == MetadataRevisionUninitialized {
panic("Cannot set latest merged revision to an uninitialized value")
}
if fbo.latestMergedRevision < rev || allowBackward {
fbo.latestMergedRevision = rev
fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev)
} else {
fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+
"the new revision (%d); won't update.", fbo.latestMergedRevision, rev)
}
}
// Assumes all necessary locking is either already done by caller, or
// is done by applyFunc.
func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context,
lState *lockState, applyFunc applyMDUpdatesFunc) error {
// first look up all MD revisions newer than my current head
start := fbo.getLatestMergedRevision(lState) + 1
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), start)
if err != nil {
return err
}
err = applyFunc(ctx, lState, rmds)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context,
lState *lockState) error {
fbo.log.CDebugf(ctx, "Fetching the newest unmerged head")
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
// We can only ever be at most one revision behind, so fetch the
// latest unmerged revision and apply it as a successor.
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid)
if err != nil {
return err
}
if md == (ImmutableRootMetadata{}) {
// There is no unmerged revision, oops!
return errors.New("Couldn't find an unmerged head")
}
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if fbo.bid != bid {
// The branches switched (apparently CR completed), so just
// try again.
fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head")
return nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil {
return err
}
fbo.notifyBatchLocked(ctx, lState, md)
if err := fbo.config.MDCache().Put(md); err != nil {
return err
}
return nil
}
// getUnmergedMDUpdates returns a slice of the unmerged MDs for this
// TLF's current unmerged branch and unmerged branch, between the
// merge point for the branch and the current head. The returned MDs
// are the same instances that are stored in the MD cache, so they
// should be modified with care.
func (fbo *folderBranchOps) getUnmergedMDUpdates(
ctx context.Context, lState *lockState) (
MetadataRevision, []ImmutableRootMetadata, error) {
// acquire mdWriterLock to read the current branch ID.
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
bid, fbo.getCurrMDRevision(lState))
}
func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) (
MetadataRevision, []ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
fbo.bid, fbo.getCurrMDRevision(lState))
}
// Returns a list of block pointers that were created during the
// staged era.
func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) ([]BlockPointer, error) {
fbo.mdWriterLock.AssertLocked(lState)
currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return nil, err
}
err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds)
if err != nil {
return nil, err
}
// We have arrived at the branch point. The new root is
// the previous revision from the current head. Find it
// and apply. TODO: somehow fake the current head into
// being currHead-1, so that future calls to
// applyMDUpdates will fetch this along with the rest of
// the updates.
fbo.setBranchIDLocked(lState, NullBranchID)
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
currHead, Merged)
if err != nil {
return nil, err
}
err = func() error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
fbo.setLatestMergedRevisionLocked(ctx, lState, rmd.Revision(), true)
return nil
}()
if err != nil {
return nil, err
}
// Return all new refs
var unmergedPtrs []BlockPointer
for _, rmd := range unmergedRmds {
for _, op := range rmd.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr != zeroPtr {
unmergedPtrs = append(unmergedPtrs, ptr)
}
}
for _, update := range op.allUpdates() {
if update.Ref != zeroPtr {
unmergedPtrs = append(unmergedPtrs, update.Ref)
}
}
}
}
return unmergedPtrs, nil
}
func (fbo *folderBranchOps) unstageLocked(ctx context.Context,
lState *lockState) error {
fbo.mdWriterLock.AssertLocked(lState)
// fetch all of my unstaged updates, and undo them one at a time
bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState)
unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return err
}
// let the server know we no longer have need
if !wasMasterBranch {
err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid)
if err != nil {
return err
}
}
// now go forward in time, if possible
err = fbo.getAndApplyMDUpdates(ctx, lState,
fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
// Finally, create a resolutionOp with the newly-unref'd pointers.
resOp := newResolutionOp()
for _, ptr := range unmergedPtrs {
resOp.AddUnrefBlock(ptr)
}
md.AddOp(resOp)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl)
}
// TODO: remove once we have automatic conflict resolution
func (fbo *folderBranchOps) UnstageForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "UnstageForTesting")
defer func() {
fbo.deferLog.CDebugf(ctx, "UnstageForTesting done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
if fbo.isMasterBranch(lState) {
// no-op
return nil
}
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// launch unstaging in a new goroutine, because we don't want to
// use the provided context because upper layers might ignore our
// notifications if we do. But we still want to wait for the
// context to cancel.
c := make(chan error, 1)
freshCtx, cancel := fbo.newCtxWithFBOID()
defer cancel()
fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting")
go func() {
lState := makeFBOLockState()
c <- fbo.doMDWriteWithRetry(ctx, lState,
func(lState *lockState) error {
return fbo.unstageLocked(freshCtx, lState)
})
}()
select {
case err := <-c:
return err
case <-ctx.Done():
return ctx.Err()
}
})
}
// mdWriterLock must be taken by the caller.
func (fbo *folderBranchOps) rekeyLocked(ctx context.Context,
lState *lockState, promptPaper bool) (err error) {
fbo.log.CDebugf(ctx, "rekeyLocked")
defer func() {
fbo.deferLog.CDebugf(ctx, "rekeyLocked done: %+v", err)
}()
fbo.mdWriterLock.AssertLocked(lState)
if !fbo.isMasterBranchLocked(lState) {
return errors.New("can't rekey while staged")
}
head := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) {
// If we already have a cached revision, make sure we're
// up-to-date with the latest revision before inspecting the
// metadata, since Rekey doesn't let us go into CR mode, and
// we don't actually get folder update notifications when the
// rekey bit is set, just a "folder needs rekey" update.
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdatesLocked); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); !ok ||
applyErr.rev != applyErr.curr {
return err
}
}
}
md, lastWriterVerifyingKey, rekeyWasSet, err :=
fbo.getMDForRekeyWriteLocked(ctx, lState)
if err != nil {
return err
}
if fbo.rekeyWithPromptTimer != nil {
if !promptPaper {
fbo.log.CDebugf(ctx, "rekeyWithPrompt superseded before it fires.")
} else if !md.IsRekeySet() {
fbo.rekeyWithPromptTimer.Stop()
fbo.rekeyWithPromptTimer = nil
// If the rekey bit isn't set, then some other device
// already took care of our request, and we can stop
// early. Note that if this FBO never registered for
// updates, then we might not yet have seen the update, in
// which case we'll still try to rekey but it will fail as
// a conflict.
fbo.log.CDebugf(ctx, "rekeyWithPrompt not needed because the "+
"rekey bit was already unset.")
return nil
}
}
currKeyGen := md.LatestKeyGeneration()
rekeyDone, tlfCryptKey, err := fbo.config.KeyManager().
Rekey(ctx, md, promptPaper)
stillNeedsRekey := false
switch err.(type) {
case nil:
// TODO: implement a "forced" option that rekeys even when the
// devices haven't changed?
if !rekeyDone {
fbo.log.CDebugf(ctx, "No rekey necessary")
return nil
}
// Clear the rekey bit if any.
md.clearRekeyBit()
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return err
}
// Readers can't clear the last revision, because:
// 1) They don't have access to the writer metadata, so can't clear the
// block changes.
// 2) Readers need the MetadataFlagWriterMetadataCopied bit set for
// MDServer to authorize the write.
// Without this check, MDServer returns an Unauthorized error.
if md.GetTlfHandle().IsWriter(uid) {
md.clearLastRevision()
}
case RekeyIncompleteError:
if !rekeyDone && rekeyWasSet {
// The rekey bit was already set, and there's nothing else
// we can to do, so don't put any new revisions.
fbo.log.CDebugf(ctx, "No further rekey possible by this user.")
return nil
}
// Rekey incomplete, fallthrough without early exit, to ensure
// we write the metadata with any potential changes
fbo.log.CDebugf(ctx,
"Rekeyed reader devices, but still need writer rekey")
case NeedOtherRekeyError:
stillNeedsRekey = true
case NeedSelfRekeyError:
stillNeedsRekey = true
default:
if err == context.DeadlineExceeded {
fbo.log.CDebugf(ctx, "Paper key prompt timed out")
// Reschedule the prompt in the timeout case.
stillNeedsRekey = true
} else {
return err
}
}
if stillNeedsRekey {
fbo.log.CDebugf(ctx, "Device doesn't have access to rekey")
// If we didn't have read access, then we don't have any
// unlocked paper keys. Wait for some time, and then if we
// still aren't rekeyed, try again but this time prompt the
// user for any known paper keys. We do this even if the
// rekey bit is already set, since we may have restarted since
// the previous rekey attempt, before prompting for the paper
// key. Only schedule this as a one-time event, since direct
// folder accesses from the user will also cause a
// rekeyWithPrompt.
//
// Only ever set the timer once.
if fbo.rekeyWithPromptTimer == nil {
d := fbo.config.RekeyWithPromptWaitTime()
fbo.log.CDebugf(ctx, "Scheduling a rekeyWithPrompt in %s", d)
fbo.rekeyWithPromptTimer = time.AfterFunc(d, fbo.rekeyWithPrompt)
}
if rekeyWasSet {
// Devices not yet keyed shouldn't set the rekey bit again
fbo.log.CDebugf(ctx, "Rekey bit already set")
return nil
}
// This device hasn't been keyed yet, fall through to set the rekey bit
}
// add an empty operation to satisfy assumptions elsewhere
md.AddOp(newRekeyOp())
// we still let readers push a new md block that we validate against reader
// permissions
err = fbo.finalizeMDRekeyWriteLocked(
ctx, lState, md, lastWriterVerifyingKey)
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
keyGen := md.LatestKeyGeneration()
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
// send rekey finish notification
handle := md.GetTlfHandle()
if currKeyGen >= FirstValidKeyGen {
fbo.config.Reporter().Notify(ctx,
rekeyNotification(ctx, fbo.config, handle, true))
}
if !stillNeedsRekey && fbo.rekeyWithPromptTimer != nil {
fbo.log.CDebugf(ctx, "Scheduled rekey timer no longer needed")
fbo.rekeyWithPromptTimer.Stop()
fbo.rekeyWithPromptTimer = nil
}
return nil
}
func (fbo *folderBranchOps) rekeyWithPrompt() {
var err error
ctx := ctxWithRandomIDReplayable(
context.Background(), CtxRekeyIDKey, CtxRekeyOpID, fbo.log)
// Only give the user limited time to enter their paper key, so we
// don't wait around forever.
d := fbo.config.RekeyWithPromptWaitTime()
ctx, cancel := context.WithTimeout(ctx, d)
defer cancel()
if ctx, err = NewContextWithCancellationDelayer(ctx); err != nil {
panic(err)
}
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.rekeyLocked(ctx, lState, true)
})
}
// Rekey rekeys the given folder.
func (fbo *folderBranchOps) Rekey(ctx context.Context, tlf tlf.ID) (err error) {
fb := FolderBranch{tlf, MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.rekeyLocked(ctx, lState, false)
})
}
func (fbo *folderBranchOps) SyncFromServerForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "SyncFromServerForTesting")
defer func() {
fbo.deferLog.CDebugf(ctx,
"SyncFromServerForTesting done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
// A journal flush before CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
// Loop until we're fully updated on the master branch.
for {
if !fbo.isMasterBranch(lState) {
if err := fbo.cr.Wait(ctx); err != nil {
return err
}
// If we are still staged after the wait, then we have a problem.
if !fbo.isMasterBranch(lState) {
return errors.Errorf("Conflict resolution didn't take us out " +
"of staging.")
}
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
if len(dirtyRefs) > 0 {
for _, ref := range dirtyRefs {
fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref)
}
return errors.New("can't sync from server while dirty")
}
// A journal flush after CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdates); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); ok {
if applyErr.rev == applyErr.curr {
fbo.log.CDebugf(ctx, "Already up-to-date with server")
return nil
}
}
if _, isUnmerged := err.(UnmergedError); isUnmerged {
continue
}
return err
}
break
}
// Wait for all the asynchronous block archiving and quota
// reclamation to hit the block server.
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil {
return err
}
if err := fbo.editHistory.Wait(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForQuotaReclamations(ctx); err != nil {
return err
}
// A second journal flush if needed, to clear out any
// archive/remove calls caused by the above operations.
return WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log)
}
// CtxFBOTagKey is the type used for unique context tags within folderBranchOps
type CtxFBOTagKey int
const (
// CtxFBOIDKey is the type of the tag for unique operation IDs
// within folderBranchOps.
CtxFBOIDKey CtxFBOTagKey = iota
)
// CtxFBOOpID is the display name for the unique operation
// folderBranchOps ID tag.
const CtxFBOOpID = "FBOID"
func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context {
return ctxWithRandomIDReplayable(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log)
}
func (fbo *folderBranchOps) newCtxWithFBOID() (context.Context, context.CancelFunc) {
// No need to call NewContextReplayable since ctxWithFBOID calls
// ctxWithRandomIDReplayable, which attaches replayably.
ctx := fbo.ctxWithFBOID(context.Background())
ctx, cancelFunc := context.WithCancel(ctx)
ctx, err := NewContextWithCancellationDelayer(ctx)
if err != nil {
panic(err)
}
return ctx, cancelFunc
}
// Run the passed function with a context that's canceled on shutdown.
func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
errChan := make(chan error, 1)
go func() {
errChan <- fn(ctx)
}()
select {
case err := <-errChan:
return err
case <-fbo.shutdownChan:
return ShutdownHappenedError{}
}
}
func (fbo *folderBranchOps) maybeFastForward(ctx context.Context,
lState *lockState, lastUpdate time.Time, currUpdate time.Time) (
fastForwardDone bool, err error) {
// Has it been long enough to try fast-forwarding?
if currUpdate.Before(lastUpdate.Add(fastForwardTimeThresh)) ||
!fbo.isMasterBranch(lState) {
return false, nil
}
fbo.log.CDebugf(ctx, "Checking head for possible "+
"fast-forwarding (last update time=%s)", lastUpdate)
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id())
if err != nil {
return false, err
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// If the journal has anything in it, don't fast-forward since we
// haven't finished flushing yet. If there was really a remote
// update on the server, we'll end up in CR eventually.
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return false, err
}
if mergedRev != MetadataRevisionUninitialized {
return false, nil
}
if !fbo.isMasterBranchLocked(lState) {
// Don't update if we're staged.
return false, nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if currHead.Revision() < fbo.latestMergedRevision+fastForwardRevThresh {
// Might as well fetch all the revisions.
return false, nil
}
fbo.log.CDebugf(ctx, "Fast-forwarding from rev %d to rev %d",
fbo.latestMergedRevision, currHead.Revision())
changes, err := fbo.blocks.FastForwardAllNodes(
ctx, lState, currHead.ReadOnly())
if err != nil {
return false, err
}
err = fbo.setHeadSuccessorLocked(ctx, lState, currHead, true /*rebase*/)
if err != nil {
return false, err
}
// Invalidate all the affected nodes.
if len(changes) > 0 {
fbo.observers.batchChanges(ctx, changes)
}
// Reset the edit history. TODO: notify any listeners that we've
// done this.
fbo.editHistory.Shutdown()
fbo.editHistory = NewTlfEditHistory(fbo.config, fbo, fbo.log)
return true, nil
}
func (fbo *folderBranchOps) locallyFinalizeTLF(ctx context.Context) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return
}
// It's safe to give this a finalized number of 1 and a fake user
// name. The whole point here is to move the old finalized TLF
// name away to a new name, where the user won't be able to access
// it anymore, and if there's a conflict with a previously-moved
// TLF that shouldn't matter.
now := fbo.config.Clock().Now()
finalizedInfo, err := tlf.NewHandleExtension(
tlf.HandleExtensionFinalized, 1, libkb.NormalizedUsername("<unknown>"),
now)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't make finalized info: %+v", err)
return
}
fakeSignedHead := &RootMetadataSigned{MD: fbo.head.bareMd}
finalRmd, err := fakeSignedHead.MakeFinalCopy(
fbo.config.Codec(), now, finalizedInfo)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't finalize MD: %+v", err)
return
}
// Construct the data needed to fake a new head.
mdID, err := fbo.config.Crypto().MakeMdID(finalRmd.MD)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized MD ID: %+v", err)
return
}
bareHandle, err := finalRmd.MD.MakeBareTlfHandle(fbo.head.Extra())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized bare handle: %+v", err)
return
}
handle, err := MakeTlfHandle(ctx, bareHandle, fbo.config.KBPKI())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized handle: %+v", err)
return
}
finalBrmd, ok := finalRmd.MD.(MutableBareRootMetadata)
if !ok {
fbo.log.CErrorf(ctx, "Couldn't get finalized mutable bare MD: %+v", err)
return
}
// We don't have a way to sign this with a valid key (and we might
// be logged out anyway), so just directly make the md immutable.
finalIrmd := ImmutableRootMetadata{
ReadOnlyRootMetadata: makeRootMetadata(
finalBrmd, fbo.head.Extra(), handle).ReadOnly(),
mdID: mdID,
}
// This will trigger the handle change notification to observers.
err = fbo.setHeadSuccessorLocked(ctx, lState, finalIrmd, false)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't set finalized MD: %+v", err)
return
}
}
func (fbo *folderBranchOps) registerAndWaitForUpdates() {
defer close(fbo.updateDoneChan)
childDone := make(chan struct{})
var lastUpdate time.Time
err := fbo.runUnlessShutdown(func(ctx context.Context) error {
defer close(childDone)
// If we fail to register for or process updates, try again
// with an exponential backoff, so we don't overwhelm the
// server or ourselves with too many attempts in a hopeless
// situation.
expBackoff := backoff.NewExponentialBackOff()
// Never give up hope until we shut down
expBackoff.MaxElapsedTime = 0
// Register and wait in a loop unless we hit an unrecoverable error
ctx, cancel := context.WithCancel(ctx)
for {
err := backoff.RetryNotifyWithContext(ctx, func() error {
// Replace the FBOID one with a fresh id for every attempt
newCtx := fbo.ctxWithFBOID(ctx)
updateChan, err := fbo.registerForUpdates(newCtx)
if err != nil {
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
return err
}
}
currUpdate, err := fbo.waitForAndProcessUpdates(
newCtx, lastUpdate, updateChan)
switch errors.Cause(err).(type) {
case UnmergedError:
// skip the back-off timer and continue directly to next
// registerForUpdates
return nil
case NewMetadataVersionError:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the newest metadata: %+v", err)
fbo.status.setPermErr(err)
cancel()
return ctx.Err()
case MDServerErrorCannotReadFinalizedTLF:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the finalized metadata for this TLF: %+v", err)
fbo.status.setPermErr(err)
// Locally finalize the TLF so new accesses
// through to the old folder name will find the
// new folder.
fbo.locallyFinalizeTLF(newCtx)
cancel()
return ctx.Err()
}
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
if err == nil {
lastUpdate = currUpdate
}
return err
}
},
expBackoff,
func(err error, nextTime time.Duration) {
fbo.log.CDebugf(ctx,
"Retrying registerForUpdates in %s due to err: %v",
nextTime, err)
})
if err != nil {
return err
}
}
})
if err != nil && err != context.Canceled {
fbo.log.CWarningf(context.Background(),
"registerAndWaitForUpdates failed unexpectedly with an error: %v",
err)
}
<-childDone
}
func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) (
updateChan <-chan error, err error) {
lState := makeFBOLockState()
currRev := fbo.getLatestMergedRevision(lState)
fbo.log.CDebugf(ctx, "Registering for updates (curr rev = %d)", currRev)
defer func() {
fbo.deferLog.CDebugf(ctx,
"Registering for updates (curr rev = %d) done: %+v",
currRev, err)
}()
// RegisterForUpdate will itself retry on connectivity issues
return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), currRev)
}
func (fbo *folderBranchOps) waitForAndProcessUpdates(
ctx context.Context, lastUpdate time.Time,
updateChan <-chan error) (currUpdate time.Time, err error) {
// successful registration; now, wait for an update or a shutdown
fbo.log.CDebugf(ctx, "Waiting for updates")
defer func() {
fbo.deferLog.CDebugf(ctx, "Waiting for updates done: %+v", err)
}()
lState := makeFBOLockState()
for {
select {
case err := <-updateChan:
fbo.log.CDebugf(ctx, "Got an update: %v", err)
if err != nil {
return time.Time{}, err
}
// Getting and applying the updates requires holding
// locks, so make sure it doesn't take too long.
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
defer cancel()
currUpdate := fbo.config.Clock().Now()
ffDone, err :=
fbo.maybeFastForward(ctx, lState, lastUpdate, currUpdate)
if err != nil {
return time.Time{}, err
}
if ffDone {
return currUpdate, nil
}
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates)
if err != nil {
fbo.log.CDebugf(ctx, "Got an error while applying "+
"updates: %v", err)
return time.Time{}, err
}
return currUpdate, nil
case unpause := <-fbo.updatePauseChan:
fbo.log.CInfof(ctx, "Updates paused")
// wait to be unpaused
select {
case <-unpause:
fbo.log.CInfof(ctx, "Updates unpaused")
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
}
}
func (fbo *folderBranchOps) backgroundFlusher(betweenFlushes time.Duration) {
ticker := time.NewTicker(betweenFlushes)
defer ticker.Stop()
lState := makeFBOLockState()
var prevDirtyRefMap map[BlockRef]bool
sameDirtyRefCount := 0
for {
doSelect := true
if fbo.blocks.GetState(lState) == dirtyState &&
fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
// We have dirty files, and the system has a full buffer,
// so don't bother waiting for a signal, just get right to
// the main attraction.
doSelect = false
}
if doSelect {
select {
case <-ticker.C:
case <-fbo.forceSyncChan:
case <-fbo.shutdownChan:
return
}
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
if len(dirtyRefs) == 0 {
sameDirtyRefCount = 0
continue
}
// Make sure we are making some progress
currDirtyRefMap := make(map[BlockRef]bool)
for _, ref := range dirtyRefs {
currDirtyRefMap[ref] = true
}
if reflect.DeepEqual(currDirtyRefMap, prevDirtyRefMap) {
sameDirtyRefCount++
} else {
sameDirtyRefCount = 0
}
if sameDirtyRefCount >= 10 {
panic(fmt.Sprintf("Making no Sync progress on dirty refs: %v",
dirtyRefs))
}
prevDirtyRefMap = currDirtyRefMap
fbo.runUnlessShutdown(func(ctx context.Context) (err error) {
// Denote that these are coming from a background
// goroutine, not directly from any user.
ctx = NewContextReplayable(ctx,
func(ctx context.Context) context.Context {
return context.WithValue(ctx, CtxBackgroundSyncKey, "1")
})
// Just in case network access or a bug gets stuck for a
// long time, time out the sync eventually.
longCtx, longCancel :=
context.WithTimeout(ctx, backgroundTaskTimeout)
defer longCancel()
// Make sure this loop doesn't starve user requests for
// too long. But use the longer-timeout version in the
// actual Sync command, to avoid unnecessary errors.
shortCtx, shortCancel := context.WithTimeout(ctx, 1*time.Second)
defer shortCancel()
for _, ref := range dirtyRefs {
select {
case <-shortCtx.Done():
fbo.log.CDebugf(ctx,
"Stopping background sync early due to timeout")
return nil
default:
}
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
err := fbo.Sync(longCtx, node)
if err != nil {
// Just log the warning and keep trying to
// sync the rest of the dirty files.
p := fbo.nodeCache.PathFromNode(node)
fbo.log.CWarningf(ctx, "Couldn't sync dirty file with "+
"ref=%v, nodeID=%s, and path=%v: %v",
ref, getNodeIDStr(node), p, err)
}
}
return nil
})
}
}
func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Lock(lState)
}
func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Unlock(lState)
}
func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
fbo.mdWriterLock.AssertLocked(lState)
// Put the blocks into the cache so that, even if we fail below,
// future attempts may reuse the blocks.
err := fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
mdID, err := fbo.config.MDOps().ResolveBranch(ctx, fbo.id(), fbo.bid,
blocksToDelete, md)
doUnmergedPut := isRevisionConflict(err)
if doUnmergedPut {
fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR")
return err
}
if err != nil {
return err
}
// Queue a rekey if the bit was set.
if md.IsRekeySet() {
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
md.loadCachedBlockChanges(ctx, bps)
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Set the head to the new MD.
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
irmd := MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now())
err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+
"successful put: %v", err)
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
// notifyOneOp for every fixed-up merged op.
for _, op := range newOps {
fbo.notifyOneOpLocked(ctx, lState, op, irmd, false)
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{irmd})
return nil
}
// finalizeResolution caches all the blocks, and writes the new MD to
// the merged branch, failing if there is a conflict. It also sends
// out the given newOps notifications locally. This is used for
// completing conflict resolution.
func (fbo *folderBranchOps) finalizeResolution(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.finalizeResolutionLocked(
ctx, lState, md, bps, newOps, blocksToDelete)
}
func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context,
lState *lockState) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure",
fbo.bid)
return fbo.unstageLocked(ctx, lState)
}
func (fbo *folderBranchOps) handleTLFBranchChange(ctx context.Context,
newBID BranchID) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.log.CDebugf(ctx, "Journal branch change: %s", newBID)
if !fbo.isMasterBranchLocked(lState) {
if fbo.bid == newBID {
fbo.log.CDebugf(ctx, "Already on branch %s", newBID)
return
}
panic(fmt.Sprintf("Cannot switch to branch %s while on branch %s",
newBID, fbo.bid))
}
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), newBID)
if err != nil {
fbo.log.CWarningf(ctx,
"No unmerged head on journal branch change (bid=%s)", newBID)
return
}
if md == (ImmutableRootMetadata{}) || md.MergedStatus() != Unmerged ||
md.BID() != newBID {
// This can happen if CR got kicked off in some other way and
// completed before we took the lock to process this
// notification.
fbo.log.CDebugf(ctx, "Ignoring stale branch change: md=%v, newBID=%d",
md, newBID)
return
}
// Everything we thought we knew about quota reclamation is now
// called into question.
fbo.fbm.clearLastQRData()
// Kick off conflict resolution and set the head to the correct branch.
fbo.setBranchIDLocked(lState, newBID)
fbo.cr.BeginNewBranch()
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, md, true /*rebased*/)
if err != nil {
fbo.log.CWarningf(ctx,
"Could not set head on journal branch change: %v", err)
return
}
}
func (fbo *folderBranchOps) onTLFBranchChange(newBID BranchID) {
fbo.branchChanges.Add(1)
go func() {
defer fbo.branchChanges.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
// This only happens on a `PruneBranch` call, in which case we
// would have already updated fbo's local view of the branch/head.
if newBID == NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring branch change back to master")
return
}
fbo.handleTLFBranchChange(ctx, newBID)
}()
}
func (fbo *folderBranchOps) handleMDFlush(ctx context.Context, bid BranchID,
rev MetadataRevision) {
fbo.log.CDebugf(ctx, "Considering archiving references for flushed MD revision %d", rev)
lState := makeFBOLockState()
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState, rev, false)
}()
// Get that revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
rev, Merged)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't get revision %d for archiving: %v",
rev, err)
return
}
if err := isArchivableMDOrError(rmd.ReadOnly()); err != nil {
fbo.log.CDebugf(
ctx, "Skipping archiving references for flushed MD revision %d: %s", rev, err)
return
}
fbo.fbm.archiveUnrefBlocks(rmd.ReadOnly())
}
func (fbo *folderBranchOps) onMDFlush(bid BranchID, rev MetadataRevision) {
fbo.mdFlushes.Add(1)
go func() {
defer fbo.mdFlushes.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
if bid != NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring MD flush on branch %v for "+
"revision %d", bid, rev)
return
}
fbo.handleMDFlush(ctx, bid, rev)
}()
}
// GetUpdateHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context,
folderBranch FolderBranch) (history TLFUpdateHistory, err error) {
fbo.log.CDebugf(ctx, "GetUpdateHistory")
defer func() {
fbo.deferLog.CDebugf(ctx, "GetUpdateHistory done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch}
}
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(),
MetadataRevisionInitial)
if err != nil {
return TLFUpdateHistory{}, err
}
if len(rmds) > 0 {
rmd := rmds[len(rmds)-1]
history.ID = rmd.TlfID().String()
history.Name = rmd.GetTlfHandle().GetCanonicalPath()
}
history.Updates = make([]UpdateSummary, 0, len(rmds))
writerNames := make(map[keybase1.UID]string)
for _, rmd := range rmds {
writer, ok := writerNames[rmd.LastModifyingWriter()]
if !ok {
name, err := fbo.config.KBPKI().
GetNormalizedUsername(ctx, rmd.LastModifyingWriter())
if err != nil {
return TLFUpdateHistory{}, err
}
writer = string(name)
writerNames[rmd.LastModifyingWriter()] = writer
}
updateSummary := UpdateSummary{
Revision: rmd.Revision(),
Date: time.Unix(0, rmd.data.Dir.Mtime),
Writer: writer,
LiveBytes: rmd.DiskUsage(),
Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)),
}
for _, op := range rmd.data.Changes.Ops {
opSummary := OpSummary{
Op: op.String(),
Refs: make([]string, 0, len(op.Refs())),
Unrefs: make([]string, 0, len(op.Unrefs())),
Updates: make(map[string]string),
}
for _, ptr := range op.Refs() {
opSummary.Refs = append(opSummary.Refs, ptr.String())
}
for _, ptr := range op.Unrefs() {
opSummary.Unrefs = append(opSummary.Unrefs, ptr.String())
}
for _, update := range op.allUpdates() {
opSummary.Updates[update.Unref.String()] = update.Ref.String()
}
updateSummary.Ops = append(updateSummary.Ops, opSummary)
}
history.Updates = append(history.Updates, updateSummary)
}
return history, nil
}
// GetEditHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetEditHistory(ctx context.Context,
folderBranch FolderBranch) (edits TlfWriterEdits, err error) {
fbo.log.CDebugf(ctx, "GetEditHistory")
defer func() {
fbo.deferLog.CDebugf(ctx, "GetEditHistory done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return nil, WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
head, err := fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
if err != nil {
return nil, err
}
return fbo.editHistory.GetComplete(ctx, head)
}
// PushStatusChange forces a new status be fetched by status listeners.
func (fbo *folderBranchOps) PushStatusChange() {
fbo.config.KBFSOps().PushStatusChange()
}
// PushConnectionStatusChange pushes human readable connection status changes.
func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) {
fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus)
}
| 1 | 15,557 | Having it above kept the block info in the cached changes, which could end up confusing things quite a bit. | keybase-kbfs | go |
@@ -409,15 +409,7 @@ HELP
outputter.print_event(node, event)
end
when 'task'
- task_name = options[:object]
-
- path, metadata = load_task_data(task_name, @config[:modulepath])
- input_method = metadata['input_method']
-
- input_method ||= 'both'
- executor.run_task(
- nodes, path, input_method, options[:task_options]
- ) do |node, event|
+ execute_task(executor, options) do |node, event|
outputter.print_event(node, event)
end
when 'file' | 1 | require 'uri'
require 'optparse'
require 'benchmark'
require 'logger'
require 'json'
require 'bolt/node'
require 'bolt/version'
require 'bolt/error'
require 'bolt/executor'
require 'bolt/outputter'
require 'bolt/config'
require 'io/console'
module Bolt
class CLIError < Bolt::Error
attr_reader :error_code
def initialize(msg, error_code: 1)
super(msg, "bolt/cli-error")
@error_code = error_code
end
end
class CLIExit < StandardError; end
class CLI
BANNER = <<-HELP.freeze
Usage: bolt <subcommand> <action> [options]
Available subcommands:
bolt command run <command> Run a command remotely
bolt script run <script> Upload a local script and run it remotely
bolt task run <task> [params] Run a Puppet task
bolt plan run <plan> [params] Run a Puppet task plan
bolt file upload <src> <dest> Upload a local file
where [options] are:
HELP
TASK_HELP = <<-HELP.freeze
Usage: bolt task <action> <task> [options] [parameters]
Available actions are:
run Run a Puppet task
Parameters are of the form <parameter>=<value>.
Available options are:
HELP
COMMAND_HELP = <<-HELP.freeze
Usage: bolt command <action> <command> [options]
Available actions are:
run Run a command remotely
Available options are:
HELP
SCRIPT_HELP = <<-HELP.freeze
Usage: bolt script <action> <script> [[arg1] ... [argN]] [options]
Available actions are:
run Upload a local script and run it remotely
Available options are:
HELP
PLAN_HELP = <<-HELP.freeze
Usage: bolt plan <action> <plan> [options] [parameters]
Available actions are:
run Run a Puppet task plan
Parameters are of the form <parameter>=<value>.
Available options are:
HELP
FILE_HELP = <<-HELP.freeze
Usage: bolt file <action> [options]
Available actions are:
upload <src> <dest> Upload local file <src> to <dest> on each node
Available options are:
HELP
MODES = %w[command script task plan file].freeze
ACTIONS = %w[run upload download].freeze
TRANSPORTS = %w[ssh winrm pcp].freeze
BOLTLIB_PATH = File.join(__FILE__, '../../../modules')
attr_reader :parser, :config
attr_accessor :options
def initialize(argv)
@argv = argv
@options = {
nodes: []
}
@config = Bolt::Config.new
@parser = create_option_parser(@options)
end
def create_option_parser(results)
OptionParser.new('') do |opts|
opts.on(
'-n', '--nodes NODES',
'Node(s) to connect to in URI format [protocol://]host[:port]',
'Eg. --nodes bolt.puppet.com',
'Eg. --nodes localhost,ssh://nix.com:2222,winrm://windows.puppet.com',
"\n",
'* NODES can either be comma-separated, \'@<file>\' to read',
'* nodes from a file, or \'-\' to read from stdin',
'* Windows nodes must specify protocol with winrm://',
'* protocol is `ssh` by default, may be `ssh` or `winrm`',
'* port is `22` by default for SSH, `5985` for winrm (Optional)'
) do |nodes|
results[:nodes] += parse_nodes(nodes)
results[:nodes].uniq!
end
opts.on('-u', '--user USER',
"User to authenticate as (Optional)") do |user|
results[:user] = user
end
opts.on('-p', '--password [PASSWORD]',
'Password to authenticate with (Optional).',
'Omit the value to prompt for the password.') do |password|
if password.nil?
STDOUT.print "Please enter your password: "
results[:password] = STDIN.noecho(&:gets).chomp
STDOUT.puts
else
results[:password] = password
end
end
opts.on('--private-key KEY',
"Private ssh key to authenticate with (Optional)") do |key|
results[:key] = key
end
opts.on('--tmpdir DIR',
"The directory to upload and execute temporary files on the target(Optional)") do |tmpdir|
results[:tmpdir] = tmpdir
end
opts.on('-c', '--concurrency CONCURRENCY', Integer,
"Maximum number of simultaneous connections " \
"(Optional, defaults to 100)") do |concurrency|
results[:concurrency] = concurrency
end
opts.on('--connect-timeout TIMEOUT', Integer,
"Connection timeout (Optional)") do |timeout|
results[:connect_timeout] = timeout
end
opts.on('--modulepath MODULES',
"List of directories containing modules, " \
"separated by #{File::PATH_SEPARATOR}") do |modulepath|
results[:modulepath] = modulepath.split(File::PATH_SEPARATOR)
end
opts.on('--params PARAMETERS',
"Parameters to a task or plan") do |params|
results[:task_options] = parse_params(params)
end
opts.on('--format FORMAT',
"Output format to use: human or json") do |format|
results[:format] = format
end
opts.on('-k', '--insecure',
"Whether to connect insecurely ") do |insecure|
results[:insecure] = insecure
end
opts.on('--transport TRANSPORT', TRANSPORTS,
"Specify a default transport: #{TRANSPORTS.join(', ')}") do |t|
options[:transport] = t
end
opts.on('--run-as USER',
"User to run as using privilege escalation") do |user|
options[:run_as] = user
end
opts.on('--sudo [PROGRAM]',
"Program to execute for privilege escalation. " \
"Currently only sudo is supported.") do |program|
options[:sudo] = program || 'sudo'
end
opts.on('--sudo-password [PASSWORD]',
'Password for privilege escalation') do |password|
if password.nil?
STDOUT.print "Please enter your privilege escalation password: "
results[:sudo_password] = STDIN.noecho(&:gets).chomp
STDOUT.puts
else
results[:sudo_password] = password
end
end
opts.on('--configfile CONFIG_PATH',
'Specify where to load the config file from') do |path|
results[:configfile] = path
end
opts.on_tail('--[no-]tty',
"Request a pseudo TTY on nodes that support it") do |tty|
results[:tty] = tty
end
opts.on_tail('-h', '--help', 'Display help') do |_|
results[:help] = true
end
opts.on_tail('--verbose', 'Display verbose logging') do |_|
results[:verbose] = true
end
opts.on_tail('--debug', 'Display debug logging') do |_|
results[:debug] = true
end
opts.on_tail('--version', 'Display the version') do |_|
puts Bolt::VERSION
raise Bolt::CLIExit
end
end
end
def parse
if @argv.empty?
options[:help] = true
end
remaining = handle_parser_errors do
parser.permute(@argv)
end
# Shortcut to handle help before other errors may be generated
options[:mode] = remaining.shift
if options[:mode] == 'help'
options[:help] = true
options[:mode] = remaining.shift
end
if options[:help]
print_help(options[:mode])
raise Bolt::CLIExit
end
@config.load_file(options[:configfile])
@config.update_from_cli(options)
@config.validate
# This section handles parsing non-flag options which are
# mode specific rather then part of the config
options[:action] = remaining.shift
options[:object] = remaining.shift
task_options, remaining = remaining.partition { |s| s =~ /.+=/ }
if options[:task_options]
unless task_options.empty?
raise Bolt::CLIError,
"Parameters must be specified through either the --params " \
"option or param=value pairs, not both"
end
else
options[:task_options] = Hash[task_options.map { |a| a.split('=', 2) }]
end
options[:leftovers] = remaining
validate(options)
options
end
def print_help(mode)
parser.banner = case mode
when 'task'
TASK_HELP
when 'command'
COMMAND_HELP
when 'script'
SCRIPT_HELP
when 'file'
FILE_HELP
when 'plan'
PLAN_HELP
else
BANNER
end
puts parser.help
end
def parse_nodes(nodes)
list = get_arg_input(nodes)
list.split(/[[:space:],]+/).reject(&:empty?).uniq
end
def parse_params(params)
json = get_arg_input(params)
JSON.parse(json)
rescue JSON::ParserError => err
raise Bolt::CLIError, "Unable to parse --params value as JSON: #{err}"
end
def get_arg_input(value)
if value.start_with?('@')
file = value.sub(/^@/, '')
read_arg_file(file)
elsif value == '-'
STDIN.read
else
value
end
end
def read_arg_file(file)
File.read(file)
rescue StandardError => err
raise Bolt::CLIError, "Error attempting to read #{file}: #{err}"
end
def validate(options)
unless MODES.include?(options[:mode])
raise Bolt::CLIError,
"Expected subcommand '#{options[:mode]}' to be one of " \
"#{MODES.join(', ')}"
end
if options[:action].nil?
raise Bolt::CLIError,
"Expected an action of the form 'bolt #{options[:mode]} <action>'"
end
unless ACTIONS.include?(options[:action])
raise Bolt::CLIError,
"Expected action '#{options[:action]}' to be one of " \
"#{ACTIONS.join(', ')}"
end
if options[:mode] != 'file' && options[:mode] != 'script' &&
!options[:leftovers].empty?
raise Bolt::CLIError,
"Unknown argument(s) #{options[:leftovers].join(', ')}"
end
if %w[task plan].include?(options[:mode])
if options[:object].nil?
raise Bolt::CLIError, "Must specify a #{options[:mode]} to run"
end
# This may mean that we parsed a parameter as the object
unless options[:object] =~ /\A([a-z][a-z0-9_]*)?(::[a-z][a-z0-9_]*)*\Z/
raise Bolt::CLIError,
"Invalid #{options[:mode]} '#{options[:object]}'"
end
end
unless !options[:nodes].empty? || options[:mode] == 'plan'
raise Bolt::CLIError, "Option '--nodes' must be specified"
end
if %w[task plan].include?(options[:mode]) && @config[:modulepath].nil?
raise Bolt::CLIError,
"Option '--modulepath' must be specified when running" \
" a task or plan"
end
end
def handle_parser_errors
yield
rescue OptionParser::MissingArgument => e
raise Bolt::CLIError, "Option '#{e.args.first}' needs a parameter"
rescue OptionParser::InvalidOption => e
raise Bolt::CLIError, "Unknown argument '#{e.args.first}'"
end
def execute(options)
if options[:mode] == 'plan' || options[:mode] == 'task'
begin
require_relative '../../vendored/require_vendored'
rescue LoadError
raise Bolt::CLIError, "Puppet must be installed to execute tasks"
end
Puppet::Util::Log.newdestination(:console)
Puppet[:log_level] = if @config[:log_level] == Logger::DEBUG
'debug'
else
'notice'
end
end
executor = Bolt::Executor.new(@config)
if options[:mode] == 'plan'
execute_plan(executor, options)
else
nodes = executor.from_uris(options[:nodes])
results = nil
outputter.print_head
elapsed_time = Benchmark.realtime do
results =
case options[:mode]
when 'command'
executor.run_command(nodes, options[:object]) do |node, event|
outputter.print_event(node, event)
end
when 'script'
script = options[:object]
validate_file('script', script)
executor.run_script(
nodes, script, options[:leftovers]
) do |node, event|
outputter.print_event(node, event)
end
when 'task'
task_name = options[:object]
path, metadata = load_task_data(task_name, @config[:modulepath])
input_method = metadata['input_method']
input_method ||= 'both'
executor.run_task(
nodes, path, input_method, options[:task_options]
) do |node, event|
outputter.print_event(node, event)
end
when 'file'
src = options[:object]
dest = options[:leftovers].first
if dest.nil?
raise Bolt::CLIError, "A destination path must be specified"
end
validate_file('source file', src)
executor.file_upload(nodes, src, dest) do |node, event|
outputter.print_event(node, event)
end
end
end
outputter.print_summary(results, elapsed_time)
end
rescue Bolt::CLIError => e
outputter.fatal_error(e)
raise e
end
def execute_plan(executor, options)
# Plans return null here?
result = Puppet.override(bolt_executor: executor) do
run_plan(options[:object],
options[:task_options],
@config[:modulepath])
end
outputter.print_plan(result)
rescue Puppet::Error
raise Bolt::CLIError, "Exiting because of an error in Puppet code"
end
def validate_file(type, path)
if path.nil?
raise Bolt::CLIError, "A #{type} must be specified"
end
stat = file_stat(path)
if !stat.readable?
raise Bolt::CLIError, "The #{type} '#{path}' is unreadable"
elsif !stat.file?
raise Bolt::CLIError, "The #{type} '#{path}' is not a file"
end
rescue Errno::ENOENT
raise Bolt::CLIError, "The #{type} '#{path}' does not exist"
end
def file_stat(path)
File.stat(path)
end
def outputter
@outputter ||= Bolt::Outputter.for_format(@config[:format])
end
def load_task_data(name, modulepath)
module_name, file_name = name.split('::', 2)
file_name ||= 'init'
begin
env = Puppet::Node::Environment.create('bolt', modulepath)
Puppet.override(environments: Puppet::Environments::Static.new(env)) do
data = Puppet::InfoService::TaskInformationService.task_data(
env.name, module_name, name
)
file = data[:files].find { |f| File.basename(f, '.*') == file_name }
if file.nil?
raise Bolt::CLIError, "Failed to load task file for '#{name}'"
end
metadata =
if data[:metadata_file]
JSON.parse(File.read(data[:metadata_file]))
else
{}
end
[file, metadata]
end
rescue Puppet::Module::Task::TaskNotFound
raise Bolt::CLIError,
"Could not find task '#{name}' in module '#{module_name}'"
rescue Puppet::Module::MissingModule
# Generate message so we don't expose "bolt environment"
raise Bolt::CLIError, "Could not find module '#{module_name}'"
end
end
def run_plan(plan, args, modulepath)
Dir.mktmpdir('bolt') do |dir|
cli = []
Puppet::Settings::REQUIRED_APP_SETTINGS.each do |setting|
cli << "--#{setting}" << dir
end
Puppet.initialize_settings(cli)
Puppet::Pal.in_tmp_environment('bolt', modulepath: [BOLTLIB_PATH] + modulepath, facts: {}) do |pal|
pal.with_script_compiler do |compiler|
compiler.call_function('run_plan', plan, args)
end
end
end
end
end
end
| 1 | 7,145 | If the execute_task returns an `ExecutionResult`, the passing of a block would be unnecessary. Instead, the `ExecutionResult` could be presented to the user in some standardized way (iterating over the result of each node, etc.). | puppetlabs-bolt | rb |
@@ -199,7 +199,7 @@ func (c *client) InstallTunnelFlows(tunnelOFPort uint32) error {
func (c *client) Initialize() error {
// Initiate connections to target OFswitch, and create tables on the switch.
- if err := c.bridge.Connect(maxRetryForOFSwitch); err != nil {
+ if err := c.bridge.Connect(maxRetryForOFSwitch, make(chan struct{})); err != nil {
return err
}
| 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openflow
import (
"fmt"
"net"
"github.com/vmware-tanzu/antrea/pkg/agent/types"
binding "github.com/vmware-tanzu/antrea/pkg/ovs/openflow"
)
const maxRetryForOFSwitch = 5
//go:generate mockgen -copyright_file ../../../hack/boilerplate/license_header.raw.txt -destination testing/mock_client.go -package=testing github.com/vmware-tanzu/antrea/pkg/agent/openflow Client
// Client is the interface to program OVS flows for entity connectivity of Antrea.
// TODO: flow sync (e.g. at agent restart), retry at failure, garbage collection mechanisms
type Client interface {
// Initialize sets up all basic flows on the specific OVS bridge.
Initialize() error
// InstallGatewayFlows sets up flows related to an OVS gateway port, the gateway must exist.
InstallGatewayFlows(gatewayAddr net.IP, gatewayMAC net.HardwareAddr, gatewayOFPort uint32) error
// InstallClusterServiceCIDRFlows sets up the appropriate flows so that traffic can reach
// the different Services running in the Cluster. This method needs to be invoked once with
// the Cluster Service CIDR as a parameter.
InstallClusterServiceCIDRFlows(serviceNet *net.IPNet, gatewayOFPort uint32) error
// InstallTunnelFlows sets up flows related to an OVS tunnel port, the tunnel port must exist.
InstallTunnelFlows(tunnelOFPort uint32) error
// InstallNodeFlows should be invoked when a connection to a remote Node is going to be set
// up. The hostname is used to identify the added flows. Calls to InstallNodeFlows are
// idempotent. Concurrent calls to InstallNodeFlows and / or UninstallNodeFlows are
// supported as long as they are all for different hostnames.
InstallNodeFlows(hostname string, localGatewayMAC net.HardwareAddr, peerGatewayIP net.IP, peerPodCIDR net.IPNet, tunnelPeerAddr net.IP) error
// UninstallNodeFlows removes the connection to the remote Node specified with the
// hostname. UninstallNodeFlows will do nothing if no connection to the host was established.
UninstallNodeFlows(hostname string) error
// InstallPodFlows should be invoked when a connection to a Pod on current Node. The
// containerID is used to identify the added flows. Calls to InstallPodFlows are
// idempotent. Concurrent calls to InstallPodFlows and / or UninstallPodFlows are
// supported as long as they are all for different containerIDs.
InstallPodFlows(containerID string, podInterfaceIP net.IP, podInterfaceMAC, gatewayMAC net.HardwareAddr, ofPort uint32) error
// UninstallPodFlows removes the connection to the local Pod specified with the
// containerID. UninstallPodFlows will do nothing if no connection to the Pod was established.
UninstallPodFlows(containerID string) error
// GetFlowTableStatus should return an array of flow table status, all existing flow tables should be included in the list.
GetFlowTableStatus() []binding.TableStatus
// InstallPolicyRuleFlows installs flows for a new NetworkPolicy rule. Rule should include all fields in the
// NetworkPolicy rule. Each ingress/egress policy rule installs Openflow entries on two tables, one for
// ruleTable and the other for dropTable. If a packet does not pass the ruleTable, it will be dropped by the
// dropTable.
InstallPolicyRuleFlows(rule *types.PolicyRule) error
// UninstallPolicyRuleFlows removes the Openflow entry relevant to the specified NetworkPolicy rule.
// UninstallPolicyRuleFlows will do nothing if no Openflow entry for the rule is installed.
UninstallPolicyRuleFlows(ruleID uint32) error
// AddPolicyRuleAddress adds one or multiple addresses to the specified NetworkPolicy rule. If addrType is true, the
// addresses are added to PolicyRule.From, else to PolicyRule.To.
AddPolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address) error
// DeletePolicyRuleAddress removes addresses from the specified NetworkPolicy rule. If addrType is srcAddress, the addresses
// are removed from PolicyRule.From, else from PolicyRule.To.
DeletePolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address) error
// Disconnect disconnects the connection between client and OFSwitch.
Disconnect() error
}
// GetFlowTableStatus returns an array of flow table status.
func (c *client) GetFlowTableStatus() []binding.TableStatus {
return c.bridge.DumpTableStatus()
}
// addMissingFlows adds any flow from flows which is not currently in the flow cache. The function
// returns immediately in case of error when adding a flow. If a flow is added succesfully, it is
// added to the flow cache. If the flow cache has not been initialized yet (i.e. there is no
// flowCacheKey key in the cache map), we create it first.
func (c *client) addMissingFlows(cache *flowCategoryCache, flowCacheKey string, flows []binding.Flow) error {
// initialize flow cache if needed
fCacheI, _ := cache.LoadOrStore(flowCacheKey, flowCache{})
fCache := fCacheI.(flowCache)
for _, flow := range flows {
flowKey := flow.MatchString()
if _, ok := fCache[flowKey]; ok {
continue
}
if err := c.flowOperations.Add(flow); err != nil {
return err
}
fCache[flow.MatchString()] = flow
}
return nil
}
// deleteFlows deletes all the flows in the flow cache indexed by the provided flowCacheKey.
func (c *client) deleteFlows(cache *flowCategoryCache, flowCacheKey string) error {
fCacheI, ok := cache.Load(flowCacheKey)
if !ok {
// no matching flows found in the cache
return nil
}
fCache := fCacheI.(flowCache)
// delete flowCache from the top-level cache if all flows were successfully deleted
defer func() {
if len(fCache) == 0 {
cache.Delete(flowCacheKey)
}
}()
for flowKey, flow := range fCache {
if err := c.flowOperations.Delete(flow); err != nil {
return err
}
delete(fCache, flowKey)
}
return nil
}
func (c *client) InstallNodeFlows(hostname string, localGatewayMAC net.HardwareAddr, peerGatewayIP net.IP, peerPodCIDR net.IPNet, tunnelPeerAddr net.IP) error {
flows := []binding.Flow{
c.arpResponderFlow(peerGatewayIP),
c.l3FwdFlowToRemote(localGatewayMAC, peerPodCIDR, tunnelPeerAddr),
}
return c.addMissingFlows(c.nodeFlowCache, hostname, flows)
}
func (c *client) UninstallNodeFlows(hostname string) error {
return c.deleteFlows(c.nodeFlowCache, hostname)
}
func (c *client) InstallPodFlows(containerID string, podInterfaceIP net.IP, podInterfaceMAC, gatewayMAC net.HardwareAddr, ofPort uint32) error {
flows := []binding.Flow{
c.podClassifierFlow(ofPort),
c.podIPSpoofGuardFlow(podInterfaceIP, podInterfaceMAC, ofPort),
c.arpSpoofGuardFlow(podInterfaceIP, podInterfaceMAC, ofPort),
c.l2ForwardCalcFlow(podInterfaceMAC, ofPort),
c.l3FlowsToPod(gatewayMAC, podInterfaceIP, podInterfaceMAC),
}
return c.addMissingFlows(c.podFlowCache, containerID, flows)
}
func (c *client) UninstallPodFlows(containerID string) error {
return c.deleteFlows(c.podFlowCache, containerID)
}
func (c *client) InstallClusterServiceCIDRFlows(serviceNet *net.IPNet, gatewayOFPort uint32) error {
return c.flowOperations.Add(c.serviceCIDRDNATFlow(serviceNet, gatewayOFPort))
}
func (c *client) InstallGatewayFlows(gatewayAddr net.IP, gatewayMAC net.HardwareAddr, gatewayOFPort uint32) error {
if err := c.flowOperations.Add(c.gatewayClassifierFlow(gatewayOFPort)); err != nil {
return err
} else if err := c.flowOperations.Add(c.gatewayIPSpoofGuardFlow(gatewayOFPort)); err != nil {
return err
} else if err := c.flowOperations.Add(c.gatewayARPSpoofGuardFlow(gatewayOFPort)); err != nil {
return err
} else if err := c.flowOperations.Add(c.l3ToGatewayFlow(gatewayAddr, gatewayMAC)); err != nil {
return err
} else if err := c.flowOperations.Add(c.l2ForwardCalcFlow(gatewayMAC, gatewayOFPort)); err != nil {
return err
}
return nil
}
func (c *client) InstallTunnelFlows(tunnelOFPort uint32) error {
if err := c.flowOperations.Add(c.tunnelClassifierFlow(tunnelOFPort)); err != nil {
return err
} else if err := c.flowOperations.Add(c.l2ForwardCalcFlow(globalVirtualMAC, tunnelOFPort)); err != nil {
return err
}
return nil
}
func (c *client) Initialize() error {
// Initiate connections to target OFswitch, and create tables on the switch.
if err := c.bridge.Connect(maxRetryForOFSwitch); err != nil {
return err
}
for _, flow := range c.defaultFlows() {
if err := c.flowOperations.Add(flow); err != nil {
return fmt.Errorf("failed to install default flows: %v", err)
}
}
if err := c.flowOperations.Add(c.arpNormalFlow()); err != nil {
return fmt.Errorf("failed to install arp normal flow: %v", err)
}
if err := c.flowOperations.Add(c.l2ForwardOutputFlow()); err != nil {
return fmt.Errorf("failed to install l2 forward output flows: %v", err)
}
for _, flow := range c.connectionTrackFlows() {
if err := c.flowOperations.Add(flow); err != nil {
return fmt.Errorf("failed to install connection track flows: %v", err)
}
}
for _, flow := range c.establishedConnectionFlows() {
if err := flow.Add(); err != nil {
return fmt.Errorf("failed to install flows to skip established connections: %v", err)
}
}
return nil
}
| 1 | 9,450 | Not sure why we make a new channel here. If we don't use this channel, how about creating it inside the `Connect`? | antrea-io-antrea | go |
@@ -1001,6 +1001,12 @@ void RaftPart::processAppendLogResponses(const AppendLogResponses& resps,
LOG_EVERY_N(WARNING, 100) << idStr_ << "Only " << numSucceeded
<< " hosts succeeded, Need to try again";
usleep(1000);
+ {
+ std::lock_guard<std::mutex> g(raftLock_);
+ // wal could be rollback between two cycles, if we still use the old lastLogId, we may always
+ // get E_RAFT_NO_WAL_FOUND
+ lastLogId = std::min(wal_->lastLogId(), lastLogId);
+ }
replicateLogs(eb, std::move(iter), currTerm, lastLogId, committedId, prevLogTerm, prevLogId);
}
} | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "kvstore/raftex/RaftPart.h"
#include <folly/executors/IOThreadPoolExecutor.h>
#include <folly/gen/Base.h>
#include <folly/io/async/EventBaseManager.h>
#include <thrift/lib/cpp/util/EnumUtils.h>
#include "common/base/Base.h"
#include "common/base/CollectNSucceeded.h"
#include "common/base/SlowOpTracker.h"
#include "common/network/NetworkUtils.h"
#include "common/stats/StatsManager.h"
#include "common/thread/NamedThread.h"
#include "common/thrift/ThriftClientManager.h"
#include "common/time/WallClock.h"
#include "common/utils/LogStrListIterator.h"
#include "interface/gen-cpp2/RaftexServiceAsyncClient.h"
#include "kvstore/LogEncoder.h"
#include "kvstore/raftex/Host.h"
#include "kvstore/raftex/RaftLogIterator.h"
#include "kvstore/stats/KVStats.h"
#include "kvstore/wal/FileBasedWal.h"
DEFINE_uint32(raft_heartbeat_interval_secs, 5, "Seconds between each heartbeat");
DEFINE_uint64(raft_snapshot_timeout, 60 * 5, "Max seconds between two snapshot requests");
DEFINE_uint32(max_batch_size, 256, "The max number of logs in a batch");
DEFINE_bool(trace_raft, false, "Enable trace one raft request");
DECLARE_int32(wal_ttl);
DECLARE_int64(wal_file_size);
DECLARE_int32(wal_buffer_size);
DECLARE_bool(wal_sync);
namespace nebula {
namespace raftex {
using nebula::network::NetworkUtils;
using nebula::thrift::ThriftClientManager;
using nebula::wal::FileBasedWal;
using nebula::wal::FileBasedWalInfo;
using nebula::wal::FileBasedWalPolicy;
using OpProcessor = folly::Function<folly::Optional<std::string>(AtomicOp op)>;
class AppendLogsIterator final : public LogIterator {
public:
AppendLogsIterator(LogID firstLogId, TermID termId, RaftPart::LogCache logs, OpProcessor opCB)
: firstLogId_(firstLogId),
termId_(termId),
logId_(firstLogId),
logs_(std::move(logs)),
opCB_(std::move(opCB)) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
AppendLogsIterator(const AppendLogsIterator&) = delete;
AppendLogsIterator(AppendLogsIterator&&) = default;
AppendLogsIterator& operator=(const AppendLogsIterator&) = delete;
AppendLogsIterator& operator=(AppendLogsIterator&&) = default;
bool leadByAtomicOp() const {
return leadByAtomicOp_;
}
bool hasNonAtomicOpLogs() const {
return hasNonAtomicOpLogs_;
}
LogID firstLogId() const {
return firstLogId_;
}
LogID lastLogId() const {
return firstLogId_ + logs_.size() - 1;
}
// Return true if the current log is a AtomicOp, otherwise return false
bool processAtomicOp() {
while (idx_ < logs_.size()) {
auto& tup = logs_.at(idx_);
auto logType = std::get<1>(tup);
if (logType != LogType::ATOMIC_OP) {
// Not a AtomicOp
return false;
}
// Process AtomicOp log
CHECK(!!opCB_);
opResult_ = opCB_(std::move(std::get<3>(tup)));
if (opResult_.hasValue()) {
// AtomicOp Succeeded
return true;
} else {
// AtomicOp failed, move to the next log, but do not increment the
// logId_
++idx_;
}
}
// Reached the end
return false;
}
LogIterator& operator++() override {
++idx_;
++logId_;
if (idx_ < logs_.size()) {
currLogType_ = logType();
valid_ = currLogType_ != LogType::ATOMIC_OP;
if (valid_) {
hasNonAtomicOpLogs_ = true;
}
valid_ = valid_ && lastLogType_ != LogType::COMMAND;
lastLogType_ = currLogType_;
} else {
valid_ = false;
}
return *this;
}
// The iterator becomes invalid when exhausting the logs
// **OR** running into a AtomicOp log
bool valid() const override {
return valid_;
}
LogID logId() const override {
DCHECK(valid());
return logId_;
}
TermID logTerm() const override {
return termId_;
}
ClusterID logSource() const override {
DCHECK(valid());
return std::get<0>(logs_.at(idx_));
}
folly::StringPiece logMsg() const override {
DCHECK(valid());
if (currLogType_ == LogType::ATOMIC_OP) {
CHECK(opResult_.hasValue());
return opResult_.value();
} else {
return std::get<2>(logs_.at(idx_));
}
}
// Return true when there is no more log left for processing
bool empty() const {
return idx_ >= logs_.size();
}
// Resume the iterator so that we can continue to process the remaining logs
void resume() {
CHECK(!valid_);
if (!empty()) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
}
LogType logType() const {
return std::get<1>(logs_.at(idx_));
}
private:
size_t idx_{0};
bool leadByAtomicOp_{false};
bool hasNonAtomicOpLogs_{false};
bool valid_{true};
LogType lastLogType_{LogType::NORMAL};
LogType currLogType_{LogType::NORMAL};
folly::Optional<std::string> opResult_;
LogID firstLogId_;
TermID termId_;
LogID logId_;
RaftPart::LogCache logs_;
OpProcessor opCB_;
};
/********************************************************
*
* Implementation of RaftPart
*
*******************************************************/
RaftPart::RaftPart(
ClusterID clusterId,
GraphSpaceID spaceId,
PartitionID partId,
HostAddr localAddr,
const folly::StringPiece walRoot,
std::shared_ptr<folly::IOThreadPoolExecutor> pool,
std::shared_ptr<thread::GenericThreadPool> workers,
std::shared_ptr<folly::Executor> executor,
std::shared_ptr<SnapshotManager> snapshotMan,
std::shared_ptr<thrift::ThriftClientManager<cpp2::RaftexServiceAsyncClient>> clientMan,
std::shared_ptr<kvstore::DiskManager> diskMan)
: idStr_{folly::stringPrintf(
"[Port: %d, Space: %d, Part: %d] ", localAddr.port, spaceId, partId)},
clusterId_{clusterId},
spaceId_{spaceId},
partId_{partId},
addr_{localAddr},
status_{Status::STARTING},
role_{Role::FOLLOWER},
leader_{"", 0},
ioThreadPool_{pool},
bgWorkers_{workers},
executor_(executor),
snapshot_(snapshotMan),
clientMan_(clientMan),
diskMan_(diskMan) {
FileBasedWalPolicy policy;
policy.fileSize = FLAGS_wal_file_size;
policy.bufferSize = FLAGS_wal_buffer_size;
policy.sync = FLAGS_wal_sync;
FileBasedWalInfo info;
info.idStr_ = idStr_;
info.spaceId_ = spaceId_;
info.partId_ = partId_;
wal_ = FileBasedWal::getWal(
walRoot,
std::move(info),
std::move(policy),
[this](LogID logId, TermID logTermId, ClusterID logClusterId, const std::string& log) {
return this->preProcessLog(logId, logTermId, logClusterId, log);
},
diskMan);
logs_.reserve(FLAGS_max_batch_size);
CHECK(!!executor_) << idStr_ << "Should not be nullptr";
}
RaftPart::~RaftPart() {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition has stopped
CHECK(status_ == Status::STOPPED);
LOG(INFO) << idStr_ << " The part has been destroyed...";
}
const char* RaftPart::roleStr(Role role) const {
switch (role) {
case Role::LEADER:
return "Leader";
case Role::FOLLOWER:
return "Follower";
case Role::CANDIDATE:
return "Candidate";
case Role::LEARNER:
return "Learner";
default:
LOG(FATAL) << idStr_ << "Invalid role";
}
return nullptr;
}
void RaftPart::start(std::vector<HostAddr>&& peers, bool asLearner) {
std::lock_guard<std::mutex> g(raftLock_);
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
term_ = lastLogTerm_;
// Set the quorum number
quorum_ = (peers.size() + 1) / 2;
auto logIdAndTerm = lastCommittedLogId();
committedLogId_ = logIdAndTerm.first;
committedLogTerm_ = logIdAndTerm.second;
if (lastLogId_ < committedLogId_) {
LOG(INFO) << idStr_ << "Reset lastLogId " << lastLogId_ << " to be the committedLogId "
<< committedLogId_;
lastLogId_ = committedLogId_;
lastLogTerm_ = committedLogTerm_;
wal_->reset();
}
LOG(INFO) << idStr_ << "There are " << peers.size() << " peer hosts, and total "
<< peers.size() + 1 << " copies. The quorum is " << quorum_ + 1 << ", as learner "
<< asLearner << ", lastLogId " << lastLogId_ << ", lastLogTerm " << lastLogTerm_
<< ", committedLogId " << committedLogId_ << ", committedLogTerm " << committedLogTerm_
<< ", term " << term_;
// Start all peer hosts
for (auto& addr : peers) {
LOG(INFO) << idStr_ << "Add peer " << addr;
auto hostPtr = std::make_shared<Host>(addr, shared_from_this());
hosts_.emplace_back(hostPtr);
}
// Change the status
status_ = Status::RUNNING;
if (asLearner) {
role_ = Role::LEARNER;
}
leader_ = HostAddr("", 0);
startTimeMs_ = time::WallClock::fastNowInMilliSec();
// Set up a leader election task
size_t delayMS = 100 + folly::Random::rand32(900);
bgWorkers_->addDelayTask(delayMS, [self = shared_from_this(), startTime = startTimeMs_] {
self->statusPolling(startTime);
});
}
void RaftPart::stop() {
VLOG(2) << idStr_ << "Stopping the partition";
decltype(hosts_) hosts;
{
std::lock_guard<std::mutex> lck(raftLock_);
status_ = Status::STOPPED;
leader_ = {"", 0};
role_ = Role::FOLLOWER;
hosts = std::move(hosts_);
}
for (auto& h : hosts) {
h->stop();
}
VLOG(2) << idStr_ << "Invoked stop() on all peer hosts";
for (auto& h : hosts) {
VLOG(2) << idStr_ << "Waiting " << h->idStr() << " to stop";
h->waitForStop();
VLOG(2) << idStr_ << h->idStr() << "has stopped";
}
hosts.clear();
LOG(INFO) << idStr_ << "Partition has been stopped";
}
nebula::cpp2::ErrorCode RaftPart::canAppendLogs() {
DCHECK(!raftLock_.try_lock());
if (UNLIKELY(status_ != Status::RUNNING)) {
LOG(ERROR) << idStr_ << "The partition is not running";
return nebula::cpp2::ErrorCode::E_RAFT_STOPPED;
}
if (UNLIKELY(role_ != Role::LEADER)) {
LOG_EVERY_N(WARNING, 1000) << idStr_ << "The partition is not a leader";
return nebula::cpp2::ErrorCode::E_LEADER_CHANGED;
}
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
nebula::cpp2::ErrorCode RaftPart::canAppendLogs(TermID termId) {
DCHECK(!raftLock_.try_lock());
nebula::cpp2::ErrorCode rc = canAppendLogs();
if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) {
return rc;
}
if (UNLIKELY(term_ != termId)) {
VLOG(2) << idStr_ << "Term has been updated, origin " << termId << ", new " << term_;
return nebula::cpp2::ErrorCode::E_RAFT_TERM_OUT_OF_DATE;
}
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
void RaftPart::addLearner(const HostAddr& addr) {
CHECK(!raftLock_.try_lock());
if (addr == addr_) {
LOG(INFO) << idStr_ << "I am learner!";
return;
}
auto it = std::find_if(
hosts_.begin(), hosts_.end(), [&addr](const auto& h) { return h->address() == addr; });
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(addr, shared_from_this(), true));
LOG(INFO) << idStr_ << "Add learner " << addr;
} else {
LOG(INFO) << idStr_ << "The host " << addr << " has been existed as "
<< ((*it)->isLearner() ? " learner " : " group member");
}
}
void RaftPart::preProcessTransLeader(const HostAddr& target) {
CHECK(!raftLock_.try_lock());
LOG(INFO) << idStr_ << "Pre process transfer leader to " << target;
switch (role_) {
case Role::FOLLOWER: {
if (target != addr_ && target != HostAddr("", 0)) {
LOG(INFO) << idStr_ << "I am follower, just wait for the new leader.";
} else {
LOG(INFO) << idStr_ << "I will be the new leader, trigger leader election now!";
bgWorkers_->addTask([self = shared_from_this()] {
{
std::lock_guard<std::mutex> lck(self->raftLock_);
self->role_ = Role::CANDIDATE;
self->leader_ = HostAddr("", 0);
}
// skip prevote for transfer leader
self->leaderElection(false).get();
});
}
break;
}
default: {
LOG(INFO) << idStr_ << "My role is " << roleStr(role_)
<< ", so do nothing when pre process transfer leader";
break;
}
}
}
void RaftPart::commitTransLeader(const HostAddr& target) {
bool needToUnlock = raftLock_.try_lock();
LOG(INFO) << idStr_ << "Commit transfer leader to " << target;
switch (role_) {
case Role::LEADER: {
if (target != addr_ && !hosts_.empty()) {
auto iter = std::find_if(
hosts_.begin(), hosts_.end(), [](const auto& h) { return !h->isLearner(); });
if (iter != hosts_.end()) {
lastMsgRecvDur_.reset();
role_ = Role::FOLLOWER;
leader_ = HostAddr("", 0);
for (auto& host : hosts_) {
host->pause();
}
LOG(INFO) << idStr_ << "Give up my leadership!";
}
} else {
LOG(INFO) << idStr_ << "I am already the leader!";
}
break;
}
case Role::FOLLOWER:
case Role::CANDIDATE: {
LOG(INFO) << idStr_ << "I am " << roleStr(role_) << ", just wait for the new leader!";
break;
}
case Role::LEARNER: {
LOG(INFO) << idStr_ << "I am learner, not in the raft group, skip the log";
break;
}
}
if (needToUnlock) {
raftLock_.unlock();
}
}
void RaftPart::updateQuorum() {
CHECK(!raftLock_.try_lock());
int32_t total = 0;
for (auto& h : hosts_) {
if (!h->isLearner()) {
total++;
}
}
quorum_ = (total + 1) / 2;
}
void RaftPart::addPeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
if (role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am learner, promote myself to be follower";
role_ = Role::FOLLOWER;
updateQuorum();
} else {
LOG(INFO) << idStr_ << "I am already in the raft group!";
}
return;
}
auto it = std::find_if(
hosts_.begin(), hosts_.end(), [&peer](const auto& h) { return h->address() == peer; });
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(peer, shared_from_this()));
updateQuorum();
LOG(INFO) << idStr_ << "Add peer " << peer;
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The host " << peer << " has been existed as learner, promote it!";
(*it)->setLearner(false);
updateQuorum();
} else {
LOG(INFO) << idStr_ << "The host " << peer << " has been existed as follower!";
}
}
}
void RaftPart::removePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
// The part will be removed in REMOVE_PART_ON_SRC phase
LOG(INFO) << idStr_ << "Remove myself from the raft group.";
return;
}
auto it = std::find_if(
hosts_.begin(), hosts_.end(), [&peer](const auto& h) { return h->address() == peer; });
if (it == hosts_.end()) {
LOG(INFO) << idStr_ << "The peer " << peer << " not exist!";
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The peer is learner, remove it directly!";
hosts_.erase(it);
return;
}
hosts_.erase(it);
updateQuorum();
LOG(INFO) << idStr_ << "Remove peer " << peer;
}
}
nebula::cpp2::ErrorCode RaftPart::checkPeer(const HostAddr& candidate) {
CHECK(!raftLock_.try_lock());
auto hosts = followers();
auto it = std::find_if(hosts.begin(), hosts.end(), [&candidate](const auto& h) {
return h->address() == candidate;
});
if (it == hosts.end()) {
LOG(INFO) << idStr_ << "The candidate " << candidate << " is not in my peers";
return nebula::cpp2::ErrorCode::E_RAFT_INVALID_PEER;
}
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
void RaftPart::addListenerPeer(const HostAddr& listener) {
std::lock_guard<std::mutex> guard(raftLock_);
if (listener == addr_) {
LOG(INFO) << idStr_ << "I am already in the raft group";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&listener](const auto& h) {
return h->address() == listener;
});
if (it == hosts_.end()) {
// Add listener as a raft learner
hosts_.emplace_back(std::make_shared<Host>(listener, shared_from_this(), true));
listeners_.emplace(listener);
LOG(INFO) << idStr_ << "Add listener " << listener;
} else {
LOG(INFO) << idStr_ << "The listener " << listener << " has joined raft group before";
}
}
void RaftPart::removeListenerPeer(const HostAddr& listener) {
std::lock_guard<std::mutex> guard(raftLock_);
if (listener == addr_) {
LOG(INFO) << idStr_ << "Remove myself from the raft group";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&listener](const auto& h) {
return h->address() == listener;
});
if (it == hosts_.end()) {
LOG(INFO) << idStr_ << "The listener " << listener << " not found";
} else {
hosts_.erase(it);
listeners_.erase(listener);
LOG(INFO) << idStr_ << "Remove listener " << listener;
}
}
void RaftPart::preProcessRemovePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (role_ == Role::LEADER) {
LOG(INFO) << idStr_ << "I am leader, skip remove peer in preProcessLog";
return;
}
removePeer(peer);
}
void RaftPart::commitRemovePeer(const HostAddr& peer) {
bool needToUnlock = raftLock_.try_lock();
SCOPE_EXIT {
if (needToUnlock) {
raftLock_.unlock();
}
};
if (role_ == Role::FOLLOWER || role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am " << roleStr(role_) << ", skip remove peer in commit";
return;
}
CHECK(Role::LEADER == role_);
removePeer(peer);
}
folly::Future<nebula::cpp2::ErrorCode> RaftPart::appendAsync(ClusterID source, std::string log) {
if (source < 0) {
source = clusterId_;
}
return appendLogAsync(source, LogType::NORMAL, std::move(log));
}
folly::Future<nebula::cpp2::ErrorCode> RaftPart::atomicOpAsync(AtomicOp op) {
return appendLogAsync(clusterId_, LogType::ATOMIC_OP, "", std::move(op));
}
folly::Future<nebula::cpp2::ErrorCode> RaftPart::sendCommandAsync(std::string log) {
return appendLogAsync(clusterId_, LogType::COMMAND, std::move(log));
}
folly::Future<nebula::cpp2::ErrorCode> RaftPart::appendLogAsync(ClusterID source,
LogType logType,
std::string log,
AtomicOp op) {
if (blocking_) {
// No need to block heartbeats and empty log.
if ((logType == LogType::NORMAL && !log.empty()) || logType == LogType::ATOMIC_OP) {
return nebula::cpp2::ErrorCode::E_RAFT_WRITE_BLOCKED;
}
}
LogCache swappedOutLogs;
auto retFuture = folly::Future<nebula::cpp2::ErrorCode>::makeEmpty();
if (bufferOverFlow_) {
LOG_EVERY_N(WARNING, 100) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
return nebula::cpp2::ErrorCode::E_RAFT_BUFFER_OVERFLOW;
}
{
std::lock_guard<std::mutex> lck(logsLock_);
VLOG(2) << idStr_ << "Checking whether buffer overflow";
if (logs_.size() >= FLAGS_max_batch_size) {
// Buffer is full
LOG(WARNING) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
bufferOverFlow_ = true;
return nebula::cpp2::ErrorCode::E_RAFT_BUFFER_OVERFLOW;
}
VLOG(2) << idStr_ << "Appending logs to the buffer";
// Append new logs to the buffer
DCHECK_GE(source, 0);
logs_.emplace_back(source, logType, std::move(log), std::move(op));
switch (logType) {
case LogType::ATOMIC_OP:
retFuture = cachingPromise_.getSingleFuture();
break;
case LogType::COMMAND:
retFuture = cachingPromise_.getAndRollSharedFuture();
break;
case LogType::NORMAL:
retFuture = cachingPromise_.getSharedFuture();
break;
}
bool expected = false;
if (replicatingLogs_.compare_exchange_strong(expected, true)) {
// We need to send logs to all followers
VLOG(2) << idStr_ << "Preparing to send AppendLog request";
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
std::swap(swappedOutLogs, logs_);
bufferOverFlow_ = false;
} else {
VLOG(2) << idStr_ << "Another AppendLogs request is ongoing, just return";
return retFuture;
}
}
LogID firstId = 0;
TermID termId = 0;
nebula::cpp2::ErrorCode res;
{
std::lock_guard<std::mutex> g(raftLock_);
res = canAppendLogs();
if (res == nebula::cpp2::ErrorCode::SUCCEEDED) {
firstId = lastLogId_ + 1;
termId = term_;
}
}
if (!checkAppendLogResult(res)) {
// Mosy likely failed because the partition is not leader
LOG_EVERY_N(WARNING, 1000) << idStr_ << "Cannot append logs, clean the buffer";
return res;
}
// Replicate buffered logs to all followers
// Replication will happen on a separate thread and will block
// until majority accept the logs, the leadership changes, or
// the partition stops
VLOG(2) << idStr_ << "Calling appendLogsInternal()";
AppendLogsIterator it(
firstId,
termId,
std::move(swappedOutLogs),
[this](AtomicOp opCB) -> folly::Optional<std::string> {
CHECK(opCB != nullptr);
auto opRet = opCB();
if (!opRet.hasValue()) {
// Failed
sendingPromise_.setOneSingleValue(nebula::cpp2::ErrorCode::E_RAFT_ATOMIC_OP_FAILED);
}
return opRet;
});
appendLogsInternal(std::move(it), termId);
return retFuture;
}
void RaftPart::appendLogsInternal(AppendLogsIterator iter, TermID termId) {
TermID currTerm = 0;
LogID prevLogId = 0;
TermID prevLogTerm = 0;
LogID committed = 0;
LogID lastId = 0;
if (iter.valid()) {
VLOG(2) << idStr_ << "Ready to append logs from id " << iter.logId() << " (Current term is "
<< currTerm << ")";
} else {
LOG(ERROR) << idStr_ << "Only happened when Atomic op failed";
replicatingLogs_ = false;
return;
}
nebula::cpp2::ErrorCode res = nebula::cpp2::ErrorCode::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
res = canAppendLogs(termId);
if (res != nebula::cpp2::ErrorCode::SUCCEEDED) {
break;
}
currTerm = term_;
prevLogId = lastLogId_;
prevLogTerm = lastLogTerm_;
committed = committedLogId_;
// Step 1: Write WAL
SlowOpTracker tracker;
if (!wal_->appendLogs(iter)) {
LOG_EVERY_N(WARNING, 100) << idStr_ << "Failed to write into WAL";
res = nebula::cpp2::ErrorCode::E_RAFT_WAL_FAIL;
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
break;
}
lastId = wal_->lastLogId();
if (tracker.slow()) {
tracker.output(idStr_, folly::stringPrintf("Write WAL, total %ld", lastId - prevLogId + 1));
}
VLOG(2) << idStr_ << "Succeeded writing logs [" << iter.firstLogId() << ", " << lastId
<< "] to WAL";
} while (false);
if (!checkAppendLogResult(res)) {
LOG_EVERY_N(WARNING, 100) << idStr_ << "Failed to write wal";
return;
}
// Step 2: Replicate to followers
auto* eb = ioThreadPool_->getEventBase();
replicateLogs(eb, std::move(iter), currTerm, lastId, committed, prevLogTerm, prevLogId);
return;
}
void RaftPart::replicateLogs(folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId) {
using namespace folly; // NOLINT since the fancy overload of | operator
decltype(hosts_) hosts;
nebula::cpp2::ErrorCode res = nebula::cpp2::ErrorCode::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
res = canAppendLogs(currTerm);
if (res != nebula::cpp2::ErrorCode::SUCCEEDED) {
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
break;
}
hosts = hosts_;
} while (false);
if (!checkAppendLogResult(res)) {
LOG(WARNING) << idStr_ << "replicateLogs failed because of not leader or term changed";
return;
}
LOG_IF(INFO, FLAGS_trace_raft) << idStr_ << "About to replicate logs in range ["
<< iter.firstLogId() << ", " << lastLogId << "] to all peer hosts";
lastMsgSentDur_.reset();
SlowOpTracker tracker;
collectNSucceeded(gen::from(hosts) |
gen::map([self = shared_from_this(),
eb,
currTerm,
lastLogId,
prevLogId,
prevLogTerm,
committedId](std::shared_ptr<Host> hostPtr) {
VLOG(2) << self->idStr_ << "Appending logs to " << hostPtr->idStr();
return via(eb, [=]() -> Future<cpp2::AppendLogResponse> {
return hostPtr->appendLogs(
eb, currTerm, lastLogId, committedId, prevLogTerm, prevLogId);
});
}) |
gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts](size_t index, cpp2::AppendLogResponse& resp) {
return resp.get_error_code() == nebula::cpp2::ErrorCode::SUCCEEDED &&
!hosts[index]->isLearner();
})
.via(executor_.get())
.then([self = shared_from_this(),
eb,
it = std::move(iter),
currTerm,
lastLogId,
committedId,
prevLogId,
prevLogTerm,
pHosts = std::move(hosts),
tracker](folly::Try<AppendLogResponses>&& result) mutable {
VLOG(2) << self->idStr_ << "Received enough response";
CHECK(!result.hasException());
if (tracker.slow()) {
tracker.output(self->idStr_,
folly::stringPrintf("Total send logs: %ld", lastLogId - prevLogId + 1));
}
self->processAppendLogResponses(*result,
eb,
std::move(it),
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId,
std::move(pHosts));
return *result;
});
}
void RaftPart::processAppendLogResponses(const AppendLogResponses& resps,
folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId,
std::vector<std::shared_ptr<Host>> hosts) {
// Make sure majority have succeeded
size_t numSucceeded = 0;
TermID highestTerm = currTerm;
for (auto& res : resps) {
if (!hosts[res.first]->isLearner() &&
res.second.get_error_code() == nebula::cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
}
highestTerm = std::max(highestTerm, res.second.get_current_term());
}
nebula::cpp2::ErrorCode res = nebula::cpp2::ErrorCode::SUCCEEDED;
{
std::lock_guard<std::mutex> g(raftLock_);
if (highestTerm > term_) {
term_ = highestTerm;
role_ = Role::FOLLOWER;
leader_ = HostAddr("", 0);
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
res = nebula::cpp2::ErrorCode::E_RAFT_TERM_OUT_OF_DATE;
}
}
if (!checkAppendLogResult(res)) {
return;
}
if (numSucceeded >= quorum_) {
// Majority have succeeded
VLOG(2) << idStr_ << numSucceeded << " hosts have accepted the logs";
LogID firstLogId = 0;
do {
std::lock_guard<std::mutex> g(raftLock_);
res = canAppendLogs(currTerm);
if (res != nebula::cpp2::ErrorCode::SUCCEEDED) {
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
break;
}
lastLogId_ = lastLogId;
lastLogTerm_ = currTerm;
} while (false);
if (!checkAppendLogResult(res)) {
LOG(WARNING) << idStr_
<< "processAppendLogResponses failed because of not leader "
"or term changed";
return;
}
{
auto walIt = wal_->iterator(committedId + 1, lastLogId);
SlowOpTracker tracker;
// Step 3: Commit the batch
auto [code, lastCommitId, lastCommitTerm] = commitLogs(std::move(walIt), true);
if (code == nebula::cpp2::ErrorCode::SUCCEEDED) {
stats::StatsManager::addValue(kCommitLogLatencyUs, execTime_);
std::lock_guard<std::mutex> g(raftLock_);
CHECK_EQ(lastLogId, lastCommitId);
committedLogId_ = lastCommitId;
committedLogTerm_ = lastCommitTerm;
firstLogId = lastLogId_ + 1;
lastMsgAcceptedCostMs_ = lastMsgSentDur_.elapsedInMSec();
lastMsgAcceptedTime_ = time::WallClock::fastNowInMilliSec();
if (!commitInThisTerm_) {
commitInThisTerm_ = true;
bgWorkers_->addTask(
[self = shared_from_this(), term = term_] { self->onLeaderReady(term); });
}
} else {
LOG(FATAL) << idStr_ << "Failed to commit logs";
}
if (tracker.slow()) {
tracker.output(idStr_,
folly::stringPrintf("Total commit: %ld", committedLogId_ - committedId));
}
VLOG(2) << idStr_ << "Leader succeeded in committing the logs " << committedId + 1 << " to "
<< lastLogId;
}
// Step 4: Fulfill the promise
if (iter.hasNonAtomicOpLogs()) {
sendingPromise_.setOneSharedValue(nebula::cpp2::ErrorCode::SUCCEEDED);
}
if (iter.leadByAtomicOp()) {
sendingPromise_.setOneSingleValue(nebula::cpp2::ErrorCode::SUCCEEDED);
}
// Step 5: Check whether need to continue
// the log replication
{
std::lock_guard<std::mutex> lck(logsLock_);
CHECK(replicatingLogs_);
// Continue to process the original AppendLogsIterator if necessary
iter.resume();
// If no more valid logs to be replicated in iter, create a new one if we
// have new log
if (iter.empty()) {
VLOG(2) << idStr_ << "logs size " << logs_.size();
if (logs_.size() > 0) {
// continue to replicate the logs
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
iter = AppendLogsIterator(firstLogId,
currTerm,
std::move(logs_),
[this](AtomicOp op) -> folly::Optional<std::string> {
auto opRet = op();
if (!opRet.hasValue()) {
// Failed
sendingPromise_.setOneSingleValue(
nebula::cpp2::ErrorCode::E_RAFT_ATOMIC_OP_FAILED);
}
return opRet;
});
logs_.clear();
bufferOverFlow_ = false;
}
// Reset replicatingLogs_ one of the following is true:
// 1. old iter is empty && logs_.size() == 0
// 2. old iter is empty && logs_.size() > 0, but all logs in new iter is
// atomic op,
// and all of them failed, which would make iter is empty again
if (iter.empty()) {
replicatingLogs_ = false;
VLOG(2) << idStr_ << "No more log to be replicated";
return;
}
}
}
this->appendLogsInternal(std::move(iter), currTerm);
} else {
// Not enough hosts accepted the log, re-try
LOG_EVERY_N(WARNING, 100) << idStr_ << "Only " << numSucceeded
<< " hosts succeeded, Need to try again";
usleep(1000);
replicateLogs(eb, std::move(iter), currTerm, lastLogId, committedId, prevLogTerm, prevLogId);
}
}
bool RaftPart::needToSendHeartbeat() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::RUNNING && role_ == Role::LEADER;
}
bool RaftPart::needToStartElection() {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING && role_ == Role::FOLLOWER &&
(lastMsgRecvDur_.elapsedInMSec() >= FLAGS_raft_heartbeat_interval_secs * 1000 ||
isBlindFollower_)) {
LOG(INFO) << idStr_ << "Start leader election, reason: lastMsgDur "
<< lastMsgRecvDur_.elapsedInMSec() << ", term " << term_;
role_ = Role::CANDIDATE;
leader_ = HostAddr("", 0);
}
return role_ == Role::CANDIDATE;
}
bool RaftPart::prepareElectionRequest(cpp2::AskForVoteRequest& req,
std::vector<std::shared_ptr<Host>>& hosts,
bool isPreVote) {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (status_ != Status::RUNNING) {
VLOG(2) << idStr_ << "The partition is not running";
return false;
}
// Make sure the role is still CANDIDATE
if (role_ != Role::CANDIDATE) {
VLOG(2) << idStr_ << "A leader has been elected";
return false;
}
req.space_ref() = spaceId_;
req.part_ref() = partId_;
req.candidate_addr_ref() = addr_.host;
req.candidate_port_ref() = addr_.port;
req.is_pre_vote_ref() = isPreVote;
// Use term_ + 1 to check if peers would vote for me in prevote.
// Only increase the term when prevote succeeeded.
if (isPreVote) {
req.term_ref() = term_ + 1;
} else {
req.term_ref() = ++term_;
// vote for myself
votedAddr_ = addr_;
votedTerm_ = term_;
}
req.last_log_id_ref() = lastLogId_;
req.last_log_term_ref() = lastLogTerm_;
hosts = followers();
return true;
}
void RaftPart::getState(cpp2::GetStateResponse& resp) {
std::lock_guard<std::mutex> g(raftLock_);
resp.term_ref() = term_;
resp.role_ref() = role_;
resp.is_leader_ref() = role_ == Role::LEADER;
resp.error_code_ref() = nebula::cpp2::ErrorCode::SUCCEEDED;
resp.committed_log_id_ref() = committedLogId_;
resp.last_log_id_ref() = lastLogId_;
resp.last_log_term_ref() = lastLogTerm_;
resp.status_ref() = status_;
}
bool RaftPart::processElectionResponses(const RaftPart::ElectionResponses& results,
std::vector<std::shared_ptr<Host>> hosts,
TermID proposedTerm,
bool isPreVote) {
std::lock_guard<std::mutex> g(raftLock_);
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(INFO) << idStr_ << "The part has been stopped, skip the request";
return false;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(INFO) << idStr_ << "The partition is still starting";
return false;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
LOG(INFO) << idStr_ << "The partition is still waiting snapshot";
return false;
}
if (role_ != Role::CANDIDATE) {
LOG(INFO) << idStr_ << "Partition's role has changed to " << roleStr(role_)
<< " during the election, so discard the results";
return false;
}
CHECK(role_ == Role::CANDIDATE);
// term changed during actual leader election
if (!isPreVote && proposedTerm != term_) {
LOG(INFO) << idStr_ << "Partition's term has changed during election, "
<< "so just ignore the respsonses, "
<< "expected " << proposedTerm << ", actual " << term_;
return false;
}
size_t numSucceeded = 0;
TermID highestTerm = isPreVote ? proposedTerm - 1 : proposedTerm;
for (auto& r : results) {
if (r.second.get_error_code() == nebula::cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
} else {
LOG(WARNING) << idStr_ << "Receive response about askForVote from "
<< hosts[r.first]->address() << ", error code is "
<< apache::thrift::util::enumNameSafe(r.second.get_error_code())
<< ", isPreVote = " << isPreVote;
}
highestTerm = std::max(highestTerm, r.second.get_current_term());
}
if (highestTerm > term_) {
term_ = highestTerm;
role_ = Role::FOLLOWER;
leader_ = HostAddr("", 0);
return false;
}
if (numSucceeded >= quorum_) {
if (isPreVote) {
LOG(INFO) << idStr_ << "Partition win prevote of term " << proposedTerm;
} else {
LOG(INFO) << idStr_ << "Partition is elected as the new leader for term " << proposedTerm;
term_ = proposedTerm;
role_ = Role::LEADER;
leader_ = addr_;
isBlindFollower_ = false;
}
return true;
}
LOG(INFO) << idStr_ << "Did not get enough votes from election of term " << proposedTerm
<< ", isPreVote = " << isPreVote;
return false;
}
folly::Future<bool> RaftPart::leaderElection(bool isPreVote) {
VLOG(2) << idStr_ << "Start leader election...";
using namespace folly; // NOLINT since the fancy overload of | operator
bool expected = false;
if (!inElection_.compare_exchange_strong(expected, true)) {
return false;
}
cpp2::AskForVoteRequest voteReq;
decltype(hosts_) hosts;
if (!prepareElectionRequest(voteReq, hosts, isPreVote)) {
// Suppose we have three replicas A(leader), B, C, after A crashed,
// B, C will begin the election. B win, and send hb, C has gap with B
// and need the snapshot from B. Meanwhile C begin the election,
// C will be Candidate, but because C is in WAITING_SNAPSHOT,
// so prepareElectionRequest will return false and go on the election.
// Because C is in Candidate, so it will reject the snapshot request from B.
// Infinite loop begins.
// So we need to go back to the follower state to avoid the case.
std::lock_guard<std::mutex> g(raftLock_);
role_ = Role::FOLLOWER;
leader_ = HostAddr("", 0);
inElection_ = false;
return false;
}
// Send out the AskForVoteRequest
LOG(INFO) << idStr_ << "Sending out an election request "
<< "(space = " << voteReq.get_space() << ", part = " << voteReq.get_part()
<< ", term = " << voteReq.get_term() << ", lastLogId = " << voteReq.get_last_log_id()
<< ", lastLogTerm = " << voteReq.get_last_log_term()
<< ", candidateIP = " << voteReq.get_candidate_addr()
<< ", candidatePort = " << voteReq.get_candidate_port() << ")"
<< ", isPreVote = " << isPreVote;
auto proposedTerm = voteReq.get_term();
auto resps = ElectionResponses();
if (hosts.empty()) {
auto ret = handleElectionResponses(resps, hosts, proposedTerm, isPreVote);
inElection_ = false;
return ret;
} else {
folly::Promise<bool> promise;
auto future = promise.getFuture();
auto eb = ioThreadPool_->getEventBase();
collectNSucceeded(
gen::from(hosts) |
gen::map([eb, self = shared_from_this(), voteReq](std::shared_ptr<Host> host) {
VLOG(2) << self->idStr_ << "Sending AskForVoteRequest to " << host->idStr();
return via(eb, [voteReq, host, eb]() -> Future<cpp2::AskForVoteResponse> {
return host->askForVote(voteReq, eb);
});
}) |
gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts](size_t idx, cpp2::AskForVoteResponse& resp) {
return resp.get_error_code() == nebula::cpp2::ErrorCode::SUCCEEDED &&
!hosts[idx]->isLearner();
})
.via(executor_.get())
.then([self = shared_from_this(), pro = std::move(promise), hosts, proposedTerm, isPreVote](
auto&& t) mutable {
VLOG(2) << self->idStr_
<< "AskForVoteRequest has been sent to all peers, waiting for responses";
CHECK(!t.hasException());
pro.setValue(
self->handleElectionResponses(t.value(), std::move(hosts), proposedTerm, isPreVote));
});
return future;
}
}
bool RaftPart::handleElectionResponses(const ElectionResponses& resps,
const std::vector<std::shared_ptr<Host>>& peers,
TermID proposedTerm,
bool isPreVote) {
// Process the responses
auto elected = processElectionResponses(resps, std::move(peers), proposedTerm, isPreVote);
if (!isPreVote && elected) {
std::vector<std::shared_ptr<Host>> hosts;
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING) {
leader_ = addr_;
hosts = hosts_;
bgWorkers_->addTask(
[self = shared_from_this(), proposedTerm] { self->onElected(proposedTerm); });
lastMsgAcceptedTime_ = 0;
}
commitInThisTerm_ = false;
}
// reset host can't be executed with raftLock_, otherwise it may encounter deadlock
for (auto& host : hosts) {
host->reset();
host->resume();
}
sendHeartbeat();
}
inElection_ = false;
return elected;
}
void RaftPart::statusPolling(int64_t startTime) {
{
std::lock_guard<std::mutex> g(raftLock_);
// If startTime is not same as the time when `statusPolling` is add to event
// loop, it means the part has been restarted (it only happens in ut for
// now), so don't add another `statusPolling`.
if (startTime != startTimeMs_) {
return;
}
}
size_t delay = FLAGS_raft_heartbeat_interval_secs * 1000 / 3 + folly::Random::rand32(500);
if (needToStartElection()) {
if (leaderElection(true).get() && leaderElection(false).get()) {
// elected as leader
} else {
// No leader has been elected, need to continue
// (After sleeping a random period between [500ms, 2s])
VLOG(2) << idStr_ << "Wait for a while and continue the leader election";
delay = (folly::Random::rand32(1500) + 500);
}
} else if (needToSendHeartbeat()) {
VLOG(2) << idStr_ << "Need to send heartbeat";
sendHeartbeat();
}
if (needToCleanupSnapshot()) {
cleanupSnapshot();
}
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING || status_ == Status::WAITING_SNAPSHOT) {
VLOG(3) << idStr_ << "Schedule new task";
bgWorkers_->addDelayTask(
delay, [self = shared_from_this(), startTime] { self->statusPolling(startTime); });
}
}
}
bool RaftPart::needToCleanupSnapshot() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::WAITING_SNAPSHOT && role_ != Role::LEADER &&
lastSnapshotRecvDur_.elapsedInSec() >= FLAGS_raft_snapshot_timeout;
}
void RaftPart::cleanupSnapshot() {
LOG(INFO) << idStr_ << "Clean up the snapshot";
std::lock_guard<std::mutex> g(raftLock_);
reset();
status_ = Status::RUNNING;
}
bool RaftPart::needToCleanWal() {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::STARTING || status_ == Status::WAITING_SNAPSHOT) {
return false;
}
for (auto& host : hosts_) {
if (host->sendingSnapshot_) {
return false;
}
}
return true;
}
void RaftPart::processAskForVoteRequest(const cpp2::AskForVoteRequest& req,
cpp2::AskForVoteResponse& resp) {
LOG(INFO) << idStr_ << "Received a VOTING request"
<< ": space = " << req.get_space() << ", partition = " << req.get_part()
<< ", candidateAddr = " << req.get_candidate_addr() << ":" << req.get_candidate_port()
<< ", term = " << req.get_term() << ", lastLogId = " << req.get_last_log_id()
<< ", lastLogTerm = " << req.get_last_log_term()
<< ", isPreVote = " << req.get_is_pre_vote();
std::lock_guard<std::mutex> g(raftLock_);
resp.current_term_ref() = term_;
// Make sure the partition is running
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(INFO) << idStr_ << "The part has been stopped, skip the request";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_STOPPED;
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(INFO) << idStr_ << "The partition is still starting";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_NOT_READY;
return;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
LOG(INFO) << idStr_ << "The partition is still waiting snapshot";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_WAITING_SNAPSHOT;
return;
}
LOG(INFO) << idStr_ << "The partition currently is a " << roleStr(role_) << ", lastLogId "
<< lastLogId_ << ", lastLogTerm " << lastLogTerm_ << ", committedLogId "
<< committedLogId_ << ", term " << term_;
if (role_ == Role::LEARNER) {
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_BAD_ROLE;
return;
}
auto candidate = HostAddr(req.get_candidate_addr(), req.get_candidate_port());
auto code = checkPeer(candidate);
if (code != nebula::cpp2::ErrorCode::SUCCEEDED) {
resp.error_code_ref() = code;
return;
}
// Check term id
if (req.get_term() < term_) {
LOG(INFO) << idStr_ << "The partition currently is on term " << term_
<< ", the term proposed by the candidate is " << req.get_term()
<< ", so it will be rejected";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_TERM_OUT_OF_DATE;
return;
}
auto oldRole = role_;
auto oldTerm = term_;
if (!req.get_is_pre_vote() && req.get_term() > term_) {
// req.get_term() > term_ in formal election, update term and convert to follower
term_ = req.get_term();
role_ = Role::FOLLOWER;
leader_ = HostAddr("", 0);
resp.current_term_ref() = term_;
} else if (req.get_is_pre_vote() && req.get_term() - 1 > term_) {
// req.get_term() - 1 > term_ in prevote, update term and convert to follower.
// we need to substract 1 because the candidate's actaul term is req.term() - 1
term_ = req.get_term() - 1;
role_ = Role::FOLLOWER;
leader_ = HostAddr("", 0);
resp.current_term_ref() = term_;
}
// Check the last term to receive a log
if (req.get_last_log_term() < lastLogTerm_) {
LOG(INFO) << idStr_ << "The partition's last term to receive a log is " << lastLogTerm_
<< ", which is newer than the candidate's log " << req.get_last_log_term()
<< ". So the candidate will be rejected";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_TERM_OUT_OF_DATE;
return;
}
if (req.get_last_log_term() == lastLogTerm_) {
// Check last log id
if (req.get_last_log_id() < lastLogId_) {
LOG(INFO) << idStr_ << "The partition's last log id is " << lastLogId_
<< ". The candidate's last log id " << req.get_last_log_id()
<< " is smaller, so it will be rejected, candidate is " << candidate;
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_LOG_STALE;
return;
}
}
/*
check if we have voted some one in the candidate's proposed term
1. if this is a prevote:
* not enough votes: the candidate will trigger another round election
* majority votes: the candidate will start formal election (I'll reject the formal one as well)
2. if this is a formal election:
* not enough votes: the candidate will trigger another round election
* majority votes: the candidate will be leader
*/
if (votedTerm_ == req.get_term() && votedAddr_ != candidate) {
LOG(INFO) << idStr_ << "We have voted " << votedAddr_ << " on term " << votedTerm_
<< ", so we should reject the candidate " << candidate << " request on term "
<< req.get_term();
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_TERM_OUT_OF_DATE;
return;
}
// Ok, no reason to refuse, we will vote for the candidate
LOG(INFO) << idStr_ << "The partition will vote for the candidate " << candidate
<< ", isPreVote = " << req.get_is_pre_vote();
if (req.get_is_pre_vote()) {
// return succeed if it is prevote, do not change any state
resp.error_code_ref() = nebula::cpp2::ErrorCode::SUCCEEDED;
return;
}
// not a pre-vote, need to rollback wal if necessary
// role_ and term_ has been set above
if (oldRole == Role::LEADER) {
if (wal_->lastLogId() > lastLogId_) {
LOG(INFO) << idStr_ << "There are some logs up to " << wal_->lastLogId()
<< " update lastLogId_ " << lastLogId_ << " to wal's";
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
}
for (auto& host : hosts_) {
host->pause();
}
}
if (oldRole == Role::LEADER) {
bgWorkers_->addTask([self = shared_from_this(), oldTerm] { self->onLostLeadership(oldTerm); });
}
// not a pre-vote, req.term() >= term_, all check passed, convert to follower
term_ = req.get_term();
role_ = Role::FOLLOWER;
leader_ = HostAddr("", 0);
votedAddr_ = candidate;
votedTerm_ = req.get_term();
resp.error_code_ref() = nebula::cpp2::ErrorCode::SUCCEEDED;
resp.current_term_ref() = term_;
// Reset the last message time
lastMsgRecvDur_.reset();
isBlindFollower_ = false;
stats::StatsManager::addValue(kNumRaftVotes);
return;
}
void RaftPart::processAppendLogRequest(const cpp2::AppendLogRequest& req,
cpp2::AppendLogResponse& resp) {
LOG_IF(INFO, FLAGS_trace_raft) << idStr_ << "Received logAppend"
<< ": GraphSpaceId = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", leaderIp = " << req.get_leader_addr()
<< ", leaderPort = " << req.get_leader_port()
<< ", current_term = " << req.get_current_term()
<< ", committedLogId = " << req.get_committed_log_id()
<< ", lastLogIdSent = " << req.get_last_log_id_sent()
<< ", lastLogTermSent = " << req.get_last_log_term_sent()
<< ", num_logs = " << req.get_log_str_list().size()
<< ", local lastLogId = " << lastLogId_
<< ", local lastLogTerm = " << lastLogTerm_
<< ", local committedLogId = " << committedLogId_
<< ", local current term = " << term_
<< ", wal lastLogId = " << wal_->lastLogId();
std::lock_guard<std::mutex> g(raftLock_);
resp.current_term_ref() = term_;
resp.leader_addr_ref() = leader_.host;
resp.leader_port_ref() = leader_.port;
resp.committed_log_id_ref() = committedLogId_;
// by default we ask leader send logs after committedLogId_
resp.last_matched_log_id_ref() = committedLogId_;
resp.last_matched_log_term_ref() = committedLogTerm_;
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
VLOG(2) << idStr_ << "The part has been stopped, skip the request";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_STOPPED;
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
VLOG(2) << idStr_ << "The partition is still starting";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_NOT_READY;
return;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
VLOG(2) << idStr_ << "The partition is waiting for snapshot";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_WAITING_SNAPSHOT;
return;
}
// Check leadership
nebula::cpp2::ErrorCode err = verifyLeader<cpp2::AppendLogRequest>(req);
// Set term_ again because it may be modified in verifyLeader
resp.current_term_ref() = term_;
if (err != nebula::cpp2::ErrorCode::SUCCEEDED) {
// Wrong leadership
VLOG(2) << idStr_ << "Will not follow the leader";
resp.error_code_ref() = err;
return;
}
// Reset the timeout timer
lastMsgRecvDur_.reset();
// `lastMatchedLogId` is the last log id of which leader's and follower's log are matched
// (which means log term of same log id are the same)
// The relationships are as follows:
// myself.committedLogId_ <= lastMatchedLogId <= lastLogId_
LogID lastMatchedLogId = committedLogId_;
do {
size_t diffIndex = 0;
size_t numLogs = req.get_log_str_list().size();
LogID firstId = req.get_last_log_id_sent() + 1;
LogID lastId = req.get_last_log_id_sent() + numLogs;
if (req.get_last_log_id_sent() == lastLogId_ && req.get_last_log_term_sent() == lastLogTerm_) {
// happy path: logs are matched, just append log
} else {
// We ask leader to send logs from committedLogId_ if one of the following occurs:
// 1. Some of log entry in current request has been committed
// 2. I don't have the log of req.last_log_id_sent
// 3. My log term on req.last_log_id_sent is not same as req.last_log_term_sent
// todo(doodle): One of the most common case when req.get_last_log_id_sent() < committedLogId_
// is that leader timeout, and retry with same request, but follower has received it
// previously in fact. There are two choise: ask leader to send logs after committedLogId_ or
// just do nothing.
if (req.get_last_log_id_sent() < committedLogId_ ||
wal_->lastLogId() < req.get_last_log_id_sent() ||
wal_->getLogTerm(req.get_last_log_id_sent()) != req.get_last_log_term_sent()) {
resp.last_matched_log_id_ref() = committedLogId_;
resp.last_matched_log_term() = committedLogTerm_;
resp.error_code() = nebula::cpp2::ErrorCode::E_RAFT_LOG_GAP;
// lastMatchedLogId is committedLogId_
return;
}
// wal_->logTerm(req.get_last_log_id_sent()) == req.get_last_log_term()
// Try to find the diff point by comparing each log entry's term of same id between local wal
// and log entry in request
TermID lastTerm = (numLogs == 0) ? req.get_last_log_term_sent()
: req.get_log_str_list().back().get_log_term();
auto localWalIt = wal_->iterator(firstId, lastId);
for (size_t i = 0; i < numLogs && localWalIt->valid(); ++i, ++(*localWalIt), ++diffIndex) {
if (localWalIt->logTerm() != req.get_log_str_list()[i].get_log_term()) {
break;
}
}
if (diffIndex == numLogs) {
// all logs are the same, ask leader to send logs after lastId
lastMatchedLogId = lastId;
resp.last_matched_log_id_ref() = lastId;
resp.last_matched_log_term_ref() = lastTerm;
break;
}
// Found a difference at log of (firstId + diffIndex), all logs from (firstId + diffIndex)
// could be truncated
wal_->rollbackToLog(firstId + diffIndex - 1);
firstId = firstId + diffIndex;
numLogs = numLogs - diffIndex;
}
// happy path or a difference is found: append remaing logs
auto logEntries = std::vector<cpp2::RaftLogEntry>(
std::make_move_iterator(req.get_log_str_list().begin() + diffIndex),
std::make_move_iterator(req.get_log_str_list().end()));
RaftLogIterator logIter(firstId, std::move(logEntries));
if (wal_->appendLogs(logIter)) {
CHECK_EQ(lastId, wal_->lastLogId());
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
lastMatchedLogId = lastLogId_;
resp.last_matched_log_id_ref() = lastLogId_;
resp.last_matched_log_term_ref() = lastLogTerm_;
} else {
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_WAL_FAIL;
return;
}
} while (false);
// If follower found a point where log matches leader's log (lastMatchedLogId), if leader's
// committed_log_id is greater than lastMatchedLogId, we can commit logs before lastMatchedLogId
LogID lastLogIdCanCommit = std::min(lastMatchedLogId, req.get_committed_log_id());
CHECK_LE(lastLogIdCanCommit, wal_->lastLogId());
if (lastLogIdCanCommit > committedLogId_) {
auto walIt = wal_->iterator(committedLogId_ + 1, lastLogIdCanCommit);
auto [code, lastCommitId, lastCommitTerm] = commitLogs(std::move(walIt), false);
if (code == nebula::cpp2::ErrorCode::SUCCEEDED) {
stats::StatsManager::addValue(kCommitLogLatencyUs, execTime_);
VLOG(1) << idStr_ << "Follower succeeded committing log " << committedLogId_ + 1 << " to "
<< lastLogIdCanCommit;
CHECK_EQ(lastLogIdCanCommit, lastCommitId);
committedLogId_ = lastCommitId;
committedLogTerm_ = lastCommitTerm;
resp.committed_log_id_ref() = lastLogIdCanCommit;
resp.error_code_ref() = nebula::cpp2::ErrorCode::SUCCEEDED;
} else if (code == nebula::cpp2::ErrorCode::E_WRITE_STALLED) {
VLOG(1) << idStr_ << "Follower delay committing log " << committedLogId_ + 1 << " to "
<< lastLogIdCanCommit;
// Even if log is not applied to state machine, still regard as succeeded:
// 1. As a follower, upcoming request will try to commit them
// 2. If it is elected as leader later, it will try to commit them as well
resp.committed_log_id_ref() = committedLogId_;
resp.error_code_ref() = nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
LOG(ERROR) << idStr_ << "Failed to commit log " << committedLogId_ + 1 << " to "
<< req.get_committed_log_id();
resp.committed_log_id_ref() = committedLogId_;
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_WAL_FAIL;
}
} else {
resp.error_code_ref() = nebula::cpp2::ErrorCode::SUCCEEDED;
}
// Reset the timeout timer again in case wal and commit takes longer time than
// expected
lastMsgRecvDur_.reset();
}
template <typename REQ>
nebula::cpp2::ErrorCode RaftPart::verifyLeader(const REQ& req) {
DCHECK(!raftLock_.try_lock());
auto peer = HostAddr(req.get_leader_addr(), req.get_leader_port());
auto code = checkPeer(peer);
if (code != nebula::cpp2::ErrorCode::SUCCEEDED) {
return code;
}
VLOG(2) << idStr_ << "The current role is " << roleStr(role_);
// Make sure the remote term is greater than local's
if (req.get_current_term() < term_) {
LOG_EVERY_N(INFO, 100) << idStr_ << "The current role is " << roleStr(role_)
<< ". The local term is " << term_ << ". The remote term is not newer";
return nebula::cpp2::ErrorCode::E_RAFT_TERM_OUT_OF_DATE;
} else if (req.get_current_term() > term_) {
// found new leader with higher term
} else {
// req.get_current_term() == term_
if (UNLIKELY(leader_ == HostAddr("", 0))) {
// I don't know who is the leader, will accept it as new leader
} else {
// I know who is leader
if (LIKELY(leader_ == peer)) {
// Same leader
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
LOG(ERROR) << idStr_ << "Split brain happens, will follow the new leader " << peer
<< " on term " << req.get_current_term();
}
}
}
// Update my state.
Role oldRole = role_;
TermID oldTerm = term_;
// Ok, no reason to refuse, just follow the leader
LOG(INFO) << idStr_ << "The current role is " << roleStr(role_) << ". Will follow the new leader "
<< peer << " on term " << req.get_current_term();
if (role_ != Role::LEARNER) {
role_ = Role::FOLLOWER;
}
leader_ = peer;
term_ = req.get_current_term();
isBlindFollower_ = false;
// Before accept the logs from the new leader, check the logs locally.
if (oldRole == Role::LEADER) {
if (wal_->lastLogId() > lastLogId_) {
LOG(INFO) << idStr_ << "There are some logs up to " << wal_->lastLogId()
<< " update lastLogId_ " << lastLogId_ << " to wal's";
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
}
for (auto& host : hosts_) {
host->pause();
}
}
if (oldRole == Role::LEADER) {
// Need to invoke onLostLeadership callback
bgWorkers_->addTask([self = shared_from_this(), oldTerm] { self->onLostLeadership(oldTerm); });
}
bgWorkers_->addTask([self = shared_from_this()] { self->onDiscoverNewLeader(self->leader_); });
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
void RaftPart::processHeartbeatRequest(const cpp2::HeartbeatRequest& req,
cpp2::HeartbeatResponse& resp) {
LOG_IF(INFO, FLAGS_trace_raft) << idStr_ << "Received heartbeat"
<< ": GraphSpaceId = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", leaderIp = " << req.get_leader_addr()
<< ", leaderPort = " << req.get_leader_port()
<< ", current_term = " << req.get_current_term()
<< ", committedLogId = " << req.get_committed_log_id()
<< ", lastLogIdSent = " << req.get_last_log_id_sent()
<< ", lastLogTermSent = " << req.get_last_log_term_sent()
<< ", local lastLogId = " << lastLogId_
<< ", local lastLogTerm = " << lastLogTerm_
<< ", local committedLogId = " << committedLogId_
<< ", local current term = " << term_;
std::lock_guard<std::mutex> g(raftLock_);
// As for heartbeat, last_log_id and last_log_term is not checked by leader, follower only verify
// whether leader is legal, just return lastLogId_ and lastLogTerm_ in resp. And we don't do any
// log appending.
// Leader will check the current_term in resp, if req.term < term_ (follower will reject req in
// verifyLeader), leader will update term and step down as follower.
resp.current_term_ref() = term_;
resp.leader_addr_ref() = leader_.host;
resp.leader_port_ref() = leader_.port;
resp.committed_log_id_ref() = committedLogId_;
resp.last_log_id_ref() = lastLogId_;
resp.last_log_term_ref() = lastLogTerm_;
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
VLOG(2) << idStr_ << "The part has been stopped, skip the request";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_STOPPED;
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
VLOG(2) << idStr_ << "The partition is still starting";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_NOT_READY;
return;
}
// Check leadership
nebula::cpp2::ErrorCode err = verifyLeader<cpp2::HeartbeatRequest>(req);
// Set term_ again because it may be modified in verifyLeader
resp.current_term_ref() = term_;
if (err != nebula::cpp2::ErrorCode::SUCCEEDED) {
// Wrong leadership
VLOG(2) << idStr_ << "Will not follow the leader";
resp.error_code_ref() = err;
return;
}
// Reset the timeout timer
lastMsgRecvDur_.reset();
// As for heartbeat, return ok after verifyLeader
resp.error_code_ref() = nebula::cpp2::ErrorCode::SUCCEEDED;
return;
}
void RaftPart::processSendSnapshotRequest(const cpp2::SendSnapshotRequest& req,
cpp2::SendSnapshotResponse& resp) {
VLOG(1) << idStr_ << "Receive snapshot, total rows " << req.get_rows().size()
<< ", total count received " << req.get_total_count() << ", total size received "
<< req.get_total_size() << ", finished " << req.get_done();
std::lock_guard<std::mutex> g(raftLock_);
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(ERROR) << idStr_ << "The part has been stopped, skip the request";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_STOPPED;
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(ERROR) << idStr_ << "The partition is still starting";
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_NOT_READY;
return;
}
if (UNLIKELY(role_ != Role::FOLLOWER && role_ != Role::LEARNER)) {
LOG(ERROR) << idStr_ << "Bad role " << roleStr(role_);
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_STOPPED;
return;
}
if (UNLIKELY(leader_ != HostAddr(req.get_leader_addr(), req.get_leader_port()) ||
term_ != req.get_term())) {
LOG(ERROR) << idStr_ << "Term out of date, current term " << term_ << ", received term "
<< req.get_term();
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_TERM_OUT_OF_DATE;
return;
}
if (status_ != Status::WAITING_SNAPSHOT) {
LOG(INFO) << idStr_ << "Begin to receive the snapshot";
reset();
status_ = Status::WAITING_SNAPSHOT;
}
lastSnapshotRecvDur_.reset();
// TODO(heng): Maybe we should save them into one sst firstly?
auto ret = commitSnapshot(
req.get_rows(), req.get_committed_log_id(), req.get_committed_log_term(), req.get_done());
stats::StatsManager::addValue(kCommitSnapshotLatencyUs, execTime_);
lastTotalCount_ += ret.first;
lastTotalSize_ += ret.second;
if (lastTotalCount_ != req.get_total_count() || lastTotalSize_ != req.get_total_size()) {
LOG(ERROR) << idStr_ << "Bad snapshot, total rows received " << lastTotalCount_
<< ", total rows sended " << req.get_total_count() << ", total size received "
<< lastTotalSize_ << ", total size sended " << req.get_total_size();
resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_PERSIST_SNAPSHOT_FAILED;
return;
}
if (req.get_done()) {
committedLogId_ = req.get_committed_log_id();
committedLogTerm_ = req.get_committed_log_term();
lastLogId_ = committedLogId_;
lastLogTerm_ = committedLogTerm_;
term_ = lastLogTerm_;
// there should be no wal after state converts to WAITING_SNAPSHOT, the RaftPart has been reset
DCHECK_EQ(wal_->firstLogId(), 0);
DCHECK_EQ(wal_->lastLogId(), 0);
status_ = Status::RUNNING;
LOG(INFO) << idStr_ << "Receive all snapshot, committedLogId_ " << committedLogId_
<< ", committedLogTerm_ " << committedLogTerm_ << ", lastLodId " << lastLogId_
<< ", lastLogTermId " << lastLogTerm_;
}
resp.error_code_ref() = nebula::cpp2::ErrorCode::SUCCEEDED;
return;
}
void RaftPart::sendHeartbeat() {
// If leader has not commit any logs in this term, it must commit all logs in
// previous term, so heartbeat is send by appending one empty log.
if (!replicatingLogs_.load(std::memory_order_acquire)) {
folly::via(executor_.get(), [this] {
std::string log = "";
appendLogAsync(clusterId_, LogType::NORMAL, std::move(log));
});
}
using namespace folly; // NOLINT since the fancy overload of | operator
VLOG(2) << idStr_ << "Send heartbeat";
TermID currTerm = 0;
LogID commitLogId = 0;
TermID prevLogTerm = 0;
LogID prevLogId = 0;
size_t replica = 0;
decltype(hosts_) hosts;
{
std::lock_guard<std::mutex> g(raftLock_);
currTerm = term_;
commitLogId = committedLogId_;
prevLogTerm = lastLogTerm_;
prevLogId = lastLogId_;
replica = quorum_;
hosts = hosts_;
}
auto eb = ioThreadPool_->getEventBase();
auto startMs = time::WallClock::fastNowInMilliSec();
collectNSucceeded(
gen::from(hosts) |
gen::map([self = shared_from_this(), eb, currTerm, commitLogId, prevLogId, prevLogTerm](
std::shared_ptr<Host> hostPtr) {
VLOG(2) << self->idStr_ << "Send heartbeat to " << hostPtr->idStr();
return via(eb, [=]() -> Future<cpp2::HeartbeatResponse> {
return hostPtr->sendHeartbeat(eb, currTerm, commitLogId, prevLogTerm, prevLogId);
});
}) |
gen::as<std::vector>(),
// Number of succeeded required
hosts.size(),
// Result evaluator
[hosts](size_t index, cpp2::HeartbeatResponse& resp) {
return resp.get_error_code() == nebula::cpp2::ErrorCode::SUCCEEDED &&
!hosts[index]->isLearner();
})
.then([replica, hosts = std::move(hosts), startMs, currTerm, this](
folly::Try<HeartbeatResponses>&& resps) {
CHECK(!resps.hasException());
size_t numSucceeded = 0;
TermID highestTerm = currTerm;
for (auto& resp : *resps) {
if (!hosts[resp.first]->isLearner() &&
resp.second.get_error_code() == nebula::cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
}
highestTerm = std::max(highestTerm, resp.second.get_current_term());
}
{
std::lock_guard<std::mutex> g(raftLock_);
if (highestTerm > term_) {
term_ = highestTerm;
role_ = Role::FOLLOWER;
leader_ = HostAddr("", 0);
return;
}
}
if (numSucceeded >= replica) {
VLOG(2) << idStr_ << "Heartbeat is accepted by quorum";
std::lock_guard<std::mutex> g(raftLock_);
auto now = time::WallClock::fastNowInMilliSec();
lastMsgAcceptedCostMs_ = now - startMs;
lastMsgAcceptedTime_ = now;
}
});
}
std::vector<std::shared_ptr<Host>> RaftPart::followers() const {
CHECK(!raftLock_.try_lock());
decltype(hosts_) hosts;
for (auto& h : hosts_) {
if (!h->isLearner()) {
hosts.emplace_back(h);
}
}
return hosts;
}
std::vector<HostAddr> RaftPart::peers() const {
std::lock_guard<std::mutex> lck(raftLock_);
std::vector<HostAddr> peer{addr_};
for (auto& host : hosts_) {
peer.emplace_back(host->address());
}
return peer;
}
std::set<HostAddr> RaftPart::listeners() const {
std::lock_guard<std::mutex> lck(raftLock_);
return listeners_;
}
std::pair<LogID, TermID> RaftPart::lastLogInfo() const {
return std::make_pair(wal_->lastLogId(), wal_->lastLogTerm());
}
bool RaftPart::checkAppendLogResult(nebula::cpp2::ErrorCode res) {
if (res != nebula::cpp2::ErrorCode::SUCCEEDED) {
{
std::lock_guard<std::mutex> lck(logsLock_);
logs_.clear();
cachingPromise_.setValue(res);
cachingPromise_.reset();
bufferOverFlow_ = false;
sendingPromise_.setValue(res);
replicatingLogs_ = false;
}
return false;
}
return true;
}
void RaftPart::reset() {
CHECK(!raftLock_.try_lock());
wal_->reset();
cleanup();
lastLogId_ = committedLogId_ = 0;
lastLogTerm_ = committedLogTerm_ = 0;
lastTotalCount_ = 0;
lastTotalSize_ = 0;
}
nebula::cpp2::ErrorCode RaftPart::isCatchedUp(const HostAddr& peer) {
std::lock_guard<std::mutex> lck(raftLock_);
LOG(INFO) << idStr_ << "Check whether I catch up";
if (role_ != Role::LEADER) {
LOG(INFO) << idStr_ << "I am not the leader";
return nebula::cpp2::ErrorCode::E_LEADER_CHANGED;
}
if (peer == addr_) {
LOG(INFO) << idStr_ << "I am the leader";
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
for (auto& host : hosts_) {
if (host->addr_ == peer) {
if (host->followerCommittedLogId_ == 0 ||
host->followerCommittedLogId_ < wal_->firstLogId()) {
LOG(INFO) << idStr_ << "The committed log id of peer is " << host->followerCommittedLogId_
<< ", which is invalid or less than my first wal log id";
return nebula::cpp2::ErrorCode::E_RAFT_SENDING_SNAPSHOT;
}
return host->sendingSnapshot_ ? nebula::cpp2::ErrorCode::E_RAFT_SENDING_SNAPSHOT
: nebula::cpp2::ErrorCode::SUCCEEDED;
}
}
return nebula::cpp2::ErrorCode::E_RAFT_INVALID_PEER;
}
bool RaftPart::linkCurrentWAL(const char* newPath) {
CHECK_NOTNULL(newPath);
std::lock_guard<std::mutex> g(raftLock_);
return wal_->linkCurrentWAL(newPath);
}
void RaftPart::checkAndResetPeers(const std::vector<HostAddr>& peers) {
std::lock_guard<std::mutex> lck(raftLock_);
// To avoid the iterator invalid, we use another container for it.
decltype(hosts_) hosts = hosts_;
for (auto& h : hosts) {
LOG(INFO) << idStr_ << "Check host " << h->addr_;
auto it = std::find(peers.begin(), peers.end(), h->addr_);
if (it == peers.end()) {
LOG(INFO) << idStr_ << "The peer " << h->addr_ << " should not exist in my peers";
removePeer(h->addr_);
}
}
for (auto& p : peers) {
LOG(INFO) << idStr_ << "Add peer " << p << " if not exist!";
addPeer(p);
}
}
void RaftPart::checkRemoteListeners(const std::set<HostAddr>& expected) {
auto actual = listeners();
for (const auto& host : actual) {
auto it = std::find(expected.begin(), expected.end(), host);
if (it == expected.end()) {
LOG(INFO) << idStr_ << "The listener " << host << " should not exist in my peers";
removeListenerPeer(host);
}
}
for (const auto& host : expected) {
auto it = std::find(actual.begin(), actual.end(), host);
if (it == actual.end()) {
LOG(INFO) << idStr_ << "Add listener " << host << " to my peers";
addListenerPeer(host);
}
}
}
bool RaftPart::leaseValid() {
std::lock_guard<std::mutex> g(raftLock_);
if (hosts_.empty()) {
return true;
}
if (!commitInThisTerm_) {
return false;
}
// When majority has accepted a log, leader obtains a lease which last for
// heartbeat. However, we need to take off the net io time. On the left side
// of the inequality is the time duration since last time leader send a log
// (the log has been accepted as well)
return time::WallClock::fastNowInMilliSec() - lastMsgAcceptedTime_ <
FLAGS_raft_heartbeat_interval_secs * 1000 - lastMsgAcceptedCostMs_;
}
} // namespace raftex
} // namespace nebula
| 1 | 33,449 | There are two question here: 1. If you only modify lastLogId, but the `iter` is not modified, I really don't know whether we can succeed. 2. We will check the if the term has changed in `replicateLogs` (if rollback really happen, the term must have been updated), so I really doubt whether the problem should be fixed in this way. Or you could describe it in more details. | vesoft-inc-nebula | cpp |
@@ -164,7 +164,7 @@ class OrderFlow extends FormFlow
$request = $this->getRequest()->request;
$requestParameters = $request->all();
$requestParameters['flow_order_step'] = $step->getNumber();
- $requestParameters[$step->getFormType()->getName()] = $stepData;
+ $requestParameters[$step->getFormType()] = $stepData;
$request->replace($requestParameters);
}
} | 1 | <?php
namespace Shopsys\ShopBundle\Form\Front\Order;
use Craue\FormFlowBundle\Form\FormFlow;
use Craue\FormFlowBundle\Form\StepInterface;
class OrderFlow extends FormFlow
{
/**
* @var bool
*/
protected $allowDynamicStepNavigation = true;
/**
* @var int
*/
private $domainId;
/**
* @param int $domainId
*/
public function setDomainId($domainId)
{
$this->domainId = $domainId;
}
/**
* @return string
*/
public function getName()
{
return 'order';
}
/**
* @return array
*/
protected function loadStepsConfig()
{
return [
[
'skip' => true, // the 1st step is the shopping cart
'form_options' => ['js_validation' => false],
],
[
'form_type' => TransportAndPaymentFormType::class,
'form_options' => ['domain_id' => $this->domainId],
],
[
'form_type' => PersonalInfoFormType::class,
'form_options' => ['domain_id' => $this->domainId],
],
];
}
/**
* @return string
*/
protected function determineInstanceId()
{
// Make instance ID constant as we do not want multiple instances of OrderFlow.
return $this->getInstanceId();
}
/**
* @param int $step
* @param array $options
* @return array
*/
public function getFormOptions($step, array $options = [])
{
$options = parent::getFormOptions($step, $options);
// Remove default validation_groups by step.
// Otherwise FormFactory uses is instead of FormType's callback.
if (isset($options['validation_groups'])) {
unset($options['validation_groups']);
}
return $options;
}
public function saveSentStepData()
{
$stepData = $this->retrieveStepData();
foreach ($this->getSteps() as $step) {
$stepForm = $this->createFormForStep($step->getNumber());
if ($this->getRequest()->request->has($stepForm->getName())) {
$stepData[$step->getNumber()] = $this->getRequest()->request->get($stepForm->getName());
}
}
$this->saveStepData($stepData);
}
/**
* @return bool
*/
public function isBackToCartTransition()
{
return $this->getRequestedStepNumber() === 2
&& $this->getRequestedTransition() === self::TRANSITION_BACK;
}
/**
* @param mixed $formData
*/
public function bind($formData)
{
parent::bind($formData); // load current step number
$firstInvalidStep = $this->getFirstInvalidStep();
if ($firstInvalidStep !== null && $this->getCurrentStepNumber() > $firstInvalidStep->getNumber()) {
$this->changeRequestToStep($firstInvalidStep);
parent::bind($formData); // load changed step
}
}
/**
* @return StepInterface|null
*/
private function getFirstInvalidStep()
{
foreach ($this->getSteps() as $step) {
if (!$this->isStepValid($step)) {
return $step;
}
}
return null;
}
/**
* @param \Craue\FormFlowBundle\Form\StepInterface $step
* @return bool
*/
private function isStepValid(StepInterface $step)
{
$stepNumber = $step->getNumber();
$stepsData = $this->retrieveStepData();
if (array_key_exists($stepNumber, $stepsData)) {
$stepForm = $this->createFormForStep($stepNumber);
$stepForm->submit($stepsData[$stepNumber]); // the form is validated here
return $stepForm->isValid();
}
return $step->getFormType() === null;
}
/**
* @param \Craue\FormFlowBundle\Form\StepInterface $step
*/
private function changeRequestToStep(StepInterface $step)
{
$stepsData = $this->retrieveStepData();
if (array_key_exists($step->getNumber(), $stepsData)) {
$stepData = $stepsData[$step->getNumber()];
} else {
$stepData = [];
}
$request = $this->getRequest()->request;
$requestParameters = $request->all();
$requestParameters['flow_order_step'] = $step->getNumber();
$requestParameters[$step->getFormType()->getName()] = $stepData;
$request->replace($requestParameters);
}
}
| 1 | 11,557 | Hello @jDolba, I have reviewed your PR and I found one problem. `$step->getFormType()` can return `FormTypeInterface`. You cannot use interface as key for an array. Can you find some better way to fix this? Thank you. | shopsys-shopsys | php |
@@ -62,6 +62,19 @@ gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
EOM
yum -y install google-osconfig-agent
echo 'inventory install done'`
+var installInventoryYumEL6 = `sleep 10
+cat > /etc/yum.repos.d/google-osconfig-agent.repo <<EOM
+[google-osconfig-agent]
+name=Google OSConfig Agent Repository
+baseurl=https://packages.cloud.google.com/yum/repos/google-osconfig-agent-el6-unstable
+enabled=1
+gpgcheck=0
+repo_gpgcheck=1
+gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+EOM
+yum -y install google-osconfig-agent
+echo 'inventory install done'`
type inventoryTestSetup struct {
image string | 1 | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package inventory
import (
"bytes"
"compress/gzip"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"log"
"path"
"regexp"
"sync"
"time"
daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"github.com/GoogleCloudPlatform/compute-image-tools/go/packages"
"github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/compute"
"github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/junitxml"
apiBeta "google.golang.org/api/compute/v0.beta"
api "google.golang.org/api/compute/v1"
)
const testSuiteName = "InventoryTests"
// TODO: Should these be configurable via flags?
const testProject = "compute-image-test-pool-001"
const testZone = "us-central1-c"
// TODO: Move to the new combined osconfig package, also make this easily available to other tests.
var installInventoryDeb = `echo 'deb http://packages.cloud.google.com/apt google-osconfig-agent-stretch-unstable main' >> /etc/apt/sources.list
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
apt-get update
apt-get install -y google-osconfig-agent
echo 'inventory install done'`
var installInventoryGooGet = `c:\programdata\googet\googet.exe -noconfirm install -sources https://packages.cloud.google.com/yuck/repos/google-osconfig-agent-unstable google-osconfig-agent
echo 'inventory install done'`
var installInventoryYumEL7 = `cat > /etc/yum.repos.d/google-osconfig-agent.repo <<EOM
[google-osconfig-agent]
name=Google OSConfig Agent Repository
baseurl=https://packages.cloud.google.com/yum/repos/google-osconfig-agent-el7-unstable
enabled=1
gpgcheck=0
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOM
yum -y install google-osconfig-agent
echo 'inventory install done'`
type inventoryTestSetup struct {
image string
name string
packageType []string
shortName string
startup *api.MetadataItems
}
// TestSuite is a InventoryTests test suite.
func TestSuite(ctx context.Context, tswg *sync.WaitGroup, testSuites chan *junitxml.TestSuite, logger *log.Logger, testSuiteRegex, testCaseRegex *regexp.Regexp) {
defer tswg.Done()
if testSuiteRegex != nil && testSuiteRegex.MatchString(testSuiteName) {
return
}
testSuite := junitxml.NewTestSuite(testSuiteName)
defer testSuite.Finish(testSuites)
logger.Printf("Running TestSuite %q", testSuite.Name)
testSetup := []*inventoryTestSetup{
// Windows images.
&inventoryTestSetup{
image: "projects/windows-cloud/global/images/family/windows-2008-r2",
packageType: []string{"googet", "wua", "qfe"},
shortName: "windows",
startup: &api.MetadataItems{
Key: "windows-startup-script-cmd",
Value: &installInventoryGooGet,
},
},
&inventoryTestSetup{
image: "projects/windows-cloud/global/images/family/windows-2012-r2",
packageType: []string{"googet", "wua", "qfe"},
shortName: "windows",
startup: &api.MetadataItems{
Key: "windows-startup-script-cmd",
Value: &installInventoryGooGet,
},
},
&inventoryTestSetup{
image: "projects/windows-cloud/global/images/family/windows-2012-r2-core",
packageType: []string{"googet", "wua", "qfe"},
shortName: "windows",
startup: &api.MetadataItems{
Key: "windows-startup-script-cmd",
Value: &installInventoryGooGet,
},
},
&inventoryTestSetup{
image: "projects/windows-cloud/global/images/family/windows-2016",
packageType: []string{"googet", "wua", "qfe"},
shortName: "windows",
startup: &api.MetadataItems{
Key: "windows-startup-script-cmd",
Value: &installInventoryGooGet,
},
},
&inventoryTestSetup{
image: "projects/windows-cloud/global/images/family/windows-2016-core",
packageType: []string{"googet", "wua", "qfe"},
shortName: "windows",
startup: &api.MetadataItems{
Key: "windows-startup-script-cmd",
Value: &installInventoryGooGet,
},
},
&inventoryTestSetup{
image: "projects/windows-cloud/global/images/family/windows-1709-core",
packageType: []string{"googet", "wua", "qfe"},
shortName: "windows",
startup: &api.MetadataItems{
Key: "windows-startup-script-cmd",
Value: &installInventoryGooGet,
},
},
&inventoryTestSetup{
image: "projects/windows-cloud/global/images/family/windows-1803-core",
packageType: []string{"googet", "wua", "qfe"},
shortName: "windows",
startup: &api.MetadataItems{
Key: "windows-startup-script-cmd",
Value: &installInventoryGooGet,
},
},
// Debian images.
&inventoryTestSetup{
image: "projects/debian-cloud/global/images/family/debian-9",
packageType: []string{"deb"},
shortName: "debian",
startup: &api.MetadataItems{
Key: "startup-script",
Value: &installInventoryDeb,
},
},
// Centos images.
&inventoryTestSetup{
image: "projects/centos-cloud/global/images/family/centos-7",
packageType: []string{"rpm"},
shortName: "centos",
startup: &api.MetadataItems{
Key: "startup-script",
Value: &installInventoryYumEL7,
},
},
// RHEL images.
&inventoryTestSetup{
image: "projects/rhel-cloud/global/images/family/rhel-7",
packageType: []string{"rpm"},
shortName: "rhel",
startup: &api.MetadataItems{
Key: "startup-script",
Value: &installInventoryYumEL7,
},
},
// Ubuntu images
&inventoryTestSetup{
image: "projects/ubuntu-os-cloud/global/images/family/ubuntu-1404-lts",
packageType: []string{"deb"},
shortName: "ubuntu",
startup: &api.MetadataItems{
Key: "startup-script",
Value: &installInventoryDeb,
},
},
&inventoryTestSetup{
image: "projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts",
packageType: []string{"deb"},
shortName: "ubuntu",
startup: &api.MetadataItems{
Key: "startup-script",
Value: &installInventoryDeb,
},
},
&inventoryTestSetup{
image: "projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts",
packageType: []string{"deb"},
shortName: "ubuntu",
startup: &api.MetadataItems{
Key: "startup-script",
Value: &installInventoryDeb,
},
},
}
var wg sync.WaitGroup
tests := make(chan *junitxml.TestCase)
for _, setup := range testSetup {
wg.Add(1)
go inventoryTestCase(ctx, setup, tests, &wg, logger, testCaseRegex)
}
go func() {
wg.Wait()
close(tests)
}()
for ret := range tests {
testSuite.TestCase = append(testSuite.TestCase, ret)
}
logger.Printf("Finished TestSuite %q", testSuite.Name)
}
func runGatherInventoryTest(ctx context.Context, testSetup *inventoryTestSetup, testCase *junitxml.TestCase) (*apiBeta.GuestAttributes, bool) {
testCase.Logf("Creating compute client")
client, err := daisyCompute.NewClient(ctx)
if err != nil {
testCase.WriteFailure("Error creating client: %v", err)
return nil, false
}
testCase.Logf("Creating instance with image %q", testSetup.image)
testSetup.name = fmt.Sprintf("inventory-test-%s-%s", path.Base(testSetup.image), compute.RandString(5))
i := &api.Instance{
Name: testSetup.name,
MachineType: fmt.Sprintf("projects/%s/zones/%s/machineTypes/n1-standard-1", testProject, testZone),
NetworkInterfaces: []*api.NetworkInterface{
&api.NetworkInterface{
Network: "global/networks/default",
AccessConfigs: []*api.AccessConfig{
&api.AccessConfig{
Type: "ONE_TO_ONE_NAT",
},
},
},
},
Metadata: &api.Metadata{
Items: []*api.MetadataItems{
testSetup.startup,
&api.MetadataItems{
Key: "enable-guest-attributes",
Value: func() *string { v := "true"; return &v }(),
},
},
},
Disks: []*api.AttachedDisk{
&api.AttachedDisk{
AutoDelete: true,
Boot: true,
InitializeParams: &api.AttachedDiskInitializeParams{
SourceImage: testSetup.image,
},
},
},
}
inst, err := compute.CreateInstance(client, testProject, testZone, i)
if err != nil {
testCase.WriteFailure("Error creating instance: %v", err)
return nil, false
}
defer inst.Cleanup()
testCase.Logf("Waiting for agent install to complete")
if err := inst.WaitForSerialOutput("inventory install done", 1, 5*time.Second, 5*time.Minute); err != nil {
testCase.WriteFailure("Error waiting for inventory agent install: %v", err)
return nil, false
}
testCase.Logf("Checking inventory data")
// It can take a long time to start collecting data, especially on Windows.
var retryTime = 10 * time.Second
for i := 0; ; i++ {
ga, err := client.GetGuestAttributes(inst.Project, inst.Zone, inst.Name, "guestInventory/", "")
totalRetryTime := time.Duration(i) * retryTime
if err != nil && totalRetryTime > 25*time.Minute {
testCase.WriteFailure("Error getting guest attributes: %v", err)
return nil, false
}
if ga != nil {
return ga, true
}
time.Sleep(retryTime)
continue
}
}
func runHostnameTest(ga *apiBeta.GuestAttributes, testSetup *inventoryTestSetup, testCase *junitxml.TestCase) {
var hostname string
for _, item := range ga.QueryValue.Items {
if item.Key == "Hostname" {
hostname = item.Value
break
}
}
if hostname == "" {
testCase.WriteFailure("Hostname not found in GuestAttributes, QueryPath: %q", ga.QueryPath)
return
}
if hostname != testSetup.name {
testCase.WriteFailure("Hostname does not match expectation: got: %q, want: %q", hostname, testSetup.name)
}
}
func runShortNameTest(ga *apiBeta.GuestAttributes, testSetup *inventoryTestSetup, testCase *junitxml.TestCase) {
var shortName string
for _, item := range ga.QueryValue.Items {
if item.Key == "ShortName" {
shortName = item.Value
break
}
}
if shortName == "" {
testCase.WriteFailure("ShortName not found in GuestAttributes, QueryPath: %q", ga.QueryPath)
return
}
if shortName != testSetup.shortName {
testCase.WriteFailure("ShortName does not match expectation: got: %q, want: %q", shortName, testSetup.shortName)
}
}
func runPackagesTest(ga *apiBeta.GuestAttributes, testSetup *inventoryTestSetup, testCase *junitxml.TestCase) {
var packagesEncoded string
for _, item := range ga.QueryValue.Items {
if item.Key == "InstalledPackages" {
packagesEncoded = item.Value
break
}
}
if packagesEncoded == "" {
testCase.WriteFailure("InstalledPackages not found in GuestAttributes, QueryPath: %q", ga.QueryPath)
return
}
decoded, err := base64.StdEncoding.DecodeString(packagesEncoded)
if err != nil {
testCase.WriteFailure(err.Error())
return
}
zr, err := gzip.NewReader(bytes.NewReader(decoded))
if err != nil {
testCase.WriteFailure(err.Error())
return
}
defer zr.Close()
var buf bytes.Buffer
if _, err := io.Copy(&buf, zr); err != nil {
testCase.WriteFailure(err.Error())
return
}
var pkgs packages.Packages
if err := json.Unmarshal(buf.Bytes(), &pkgs); err != nil {
testCase.WriteFailure(err.Error())
return
}
for _, pt := range testSetup.packageType {
switch pt {
case "googet":
if len(pkgs.GooGet) < 1 {
testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt)
return
}
case "deb":
if len(pkgs.Deb) < 1 {
testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt)
return
}
case "rpm":
if len(pkgs.Rpm) < 1 {
testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt)
return
}
case "pip":
if len(pkgs.Pip) < 1 {
testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt)
return
}
case "gem":
if len(pkgs.Gem) < 1 {
testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt)
return
}
case "wua":
if len(pkgs.WUA) < 1 {
testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt)
return
}
case "qfe":
if len(pkgs.QFE) < 1 {
testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt)
return
}
}
}
}
func inventoryTestCase(ctx context.Context, testSetup *inventoryTestSetup, tests chan *junitxml.TestCase, wg *sync.WaitGroup, logger *log.Logger, regex *regexp.Regexp) {
defer wg.Done()
gatherInventoryTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[%s] Gather Inventory", testSetup.image))
hostnameTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[%s] Check Hostname", testSetup.image))
shortNameTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[%s] Check ShortName", testSetup.image))
packageTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[%s] Check InstalledPackages", testSetup.image))
if gatherInventoryTest.FilterTestCase(regex) {
gatherInventoryTest.Finish(tests)
hostnameTest.WriteSkipped("Setup skipped")
hostnameTest.Finish(tests)
shortNameTest.WriteSkipped("Setup skipped")
hostnameTest.Finish(tests)
packageTest.WriteSkipped("Setup skipped")
packageTest.Finish(tests)
return
}
logger.Printf("Running TestCase '%s.%q'", gatherInventoryTest.Classname, gatherInventoryTest.Name)
ga, ok := runGatherInventoryTest(ctx, testSetup, gatherInventoryTest)
gatherInventoryTest.Finish(tests)
logger.Printf("TestCase '%s.%q' finished", gatherInventoryTest.Classname, gatherInventoryTest.Name)
if !ok {
hostnameTest.WriteFailure("Setup Failure")
hostnameTest.Finish(tests)
shortNameTest.WriteFailure("Setup Failure")
shortNameTest.Finish(tests)
packageTest.WriteFailure("Setup Failure")
packageTest.Finish(tests)
return
}
for tc, f := range map[*junitxml.TestCase]func(*apiBeta.GuestAttributes, *inventoryTestSetup, *junitxml.TestCase){
hostnameTest: runHostnameTest,
shortNameTest: runShortNameTest,
packageTest: runPackagesTest,
} {
if tc.FilterTestCase(regex) {
tc.Finish(tests)
} else {
logger.Printf("Running TestCase '%s.%q'", tc.Classname, tc.Name)
f(ga, testSetup, tc)
tc.Finish(tests)
logger.Printf("TestCase '%s.%q' finished in %fs", tc.Classname, tc.Name, tc.Time)
}
}
}
| 1 | 8,111 | i was confused by keyword "inventory" here. my understanding is this starts the osconfig-agent which covers inventory lookup and package-management(correct me if i am wrong). | GoogleCloudPlatform-compute-image-tools | go |
@@ -47,6 +47,10 @@ public interface CapabilityType {
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String STRICT_FILE_INTERACTABILITY = "strictFileInteractability";
+ String TIMEOUTS = "timeouts";
+ String IMPLICIT_TIMEOUT = "implicit";
+ String PAGE_LOAD_TIMEOUT = "pageLoad";
+ String SCRIPT_TIMEOUT = "script";
String LOGGING_PREFS = "loggingPrefs";
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
/**
* Commonly seen remote webdriver capabilities.
*/
public interface CapabilityType {
String BROWSER_NAME = "browserName";
@Deprecated String PLATFORM = "platform";
String PLATFORM_NAME = "platformName";
String SUPPORTS_JAVASCRIPT = "javascriptEnabled";
String TAKES_SCREENSHOT = "takesScreenshot";
String VERSION = "version";
String BROWSER_VERSION = "browserVersion";
String SUPPORTS_ALERTS = "handlesAlerts";
String SUPPORTS_SQL_DATABASE = "databaseEnabled";
String SUPPORTS_LOCATION_CONTEXT = "locationContextEnabled";
String SUPPORTS_APPLICATION_CACHE = "applicationCacheEnabled";
String SUPPORTS_NETWORK_CONNECTION = "networkConnectionEnabled";
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
String APPLICATION_NAME = "applicationName";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String ACCEPT_INSECURE_CERTS = "acceptInsecureCerts";
String HAS_NATIVE_EVENTS = "nativeEvents";
String UNEXPECTED_ALERT_BEHAVIOUR = "unexpectedAlertBehaviour";
String UNHANDLED_PROMPT_BEHAVIOUR = "unhandledPromptBehavior";
String ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior";
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String STRICT_FILE_INTERACTABILITY = "strictFileInteractability";
String LOGGING_PREFS = "loggingPrefs";
String ENABLE_PROFILING_CAPABILITY = "webdriver.logging.profiler.enabled";
String PAGE_LOAD_STRATEGY = "pageLoadStrategy";
interface ForSeleniumServer {
String AVOIDING_PROXY = "avoidProxy";
String ONLY_PROXYING_SELENIUM_TRAFFIC = "onlyProxySeleniumTraffic";
String PROXYING_EVERYTHING = "proxyEverything";
String PROXY_PAC = "proxy_pac";
String ENSURING_CLEAN_SESSION = "ensureCleanSession";
}
}
| 1 | 19,072 | These are really meant to be the keys in the capabilities, not the keys of values within the capabilities | SeleniumHQ-selenium | py |
@@ -1,5 +1,6 @@
import { options, createElement as h, render } from 'preact';
import { useEffect, useState } from 'preact/hooks';
+import sinon from 'sinon';
import { setupScratch, teardown } from '../../../test/_util/helpers';
import { act } from '../../src'; | 1 | import { options, createElement as h, render } from 'preact';
import { useEffect, useState } from 'preact/hooks';
import { setupScratch, teardown } from '../../../test/_util/helpers';
import { act } from '../../src';
/** @jsx h */
describe('act', () => {
/** @type {HTMLDivElement} */
let scratch;
beforeEach(() => {
scratch = setupScratch();
});
afterEach(() => {
teardown(scratch);
});
it('should reset options after act finishes', () => {
expect(options.requestAnimationFrame).to.equal(undefined);
act(() => null);
expect(options.requestAnimationFrame).to.equal(undefined);
});
it('should drain the queue of hooks', () => {
function StateContainer() {
const [count, setCount] = useState(0);
return (<div>
<p>Count: {count}</p>
<button onClick={() => setCount(c => c + 11)} />
</div>);
}
render(<StateContainer />, scratch);
expect(scratch.textContent).to.include('Count: 0');
act(() => {
const button = scratch.querySelector('button');
button.click();
expect(scratch.textContent).to.include('Count: 0');
});
expect(scratch.textContent).to.include('Count: 1');
});
it('should flush pending effects', () => {
let spy = sinon.spy();
function StateContainer() {
useEffect(spy);
return <div />;
}
act(() => render(<StateContainer />, scratch));
expect(spy).to.be.calledOnce;
});
it('should restore options.requestAnimationFrame', () => {
const spy = sinon.spy();
options.requestAnimationFrame = spy;
act(() => null);
expect(options.requestAnimationFrame).to.equal(spy);
expect(spy).to.not.be.called;
});
it('should restore options.debounceRendering after act', () => {
const spy = sinon.spy();
options.debounceRendering = spy;
act(() => null);
expect(options.debounceRendering).to.equal(spy);
expect(spy).to.not.be.called;
});
});
| 1 | 12,728 | This breaks tests on IE because this will import an `esm` bundle. For that reason `sinon` is available as a global in our test suite and never imported. The global is aliased to the proper `es5` file. | preactjs-preact | js |
@@ -12,6 +12,18 @@ import (
"github.com/lucas-clemente/quic-go/utils"
)
+const (
+ // Maximum reordering in time space before time based loss detection considers a packet lost.
+ // In fraction of an RTT.
+ timeReorderingFraction = 1.0 / 8
+ // defaultRTOTimeout is the RTO time on new connections
+ defaultRTOTimeout = 500 * time.Millisecond
+ // Minimum time in the future an RTO alarm may be set for.
+ minRTOTimeout = 200 * time.Millisecond
+ // maxRTOTimeout is the maximum RTO time
+ maxRTOTimeout = 60 * time.Second
+)
+
var (
// ErrDuplicateOrOutOfOrderAck occurs when a duplicate or an out-of-order ACK is received
ErrDuplicateOrOutOfOrderAck = errors.New("SentPacketHandler: Duplicate or out-of-order ACK") | 1 | package ackhandler
import (
"errors"
"fmt"
"time"
"github.com/lucas-clemente/quic-go/congestion"
"github.com/lucas-clemente/quic-go/frames"
"github.com/lucas-clemente/quic-go/protocol"
"github.com/lucas-clemente/quic-go/qerr"
"github.com/lucas-clemente/quic-go/utils"
)
var (
// ErrDuplicateOrOutOfOrderAck occurs when a duplicate or an out-of-order ACK is received
ErrDuplicateOrOutOfOrderAck = errors.New("SentPacketHandler: Duplicate or out-of-order ACK")
// ErrTooManyTrackedSentPackets occurs when the sentPacketHandler has to keep track of too many packets
ErrTooManyTrackedSentPackets = errors.New("Too many outstanding non-acked and non-retransmitted packets")
// ErrAckForSkippedPacket occurs when the client sent an ACK for a packet number that we intentionally skipped
ErrAckForSkippedPacket = qerr.Error(qerr.InvalidAckData, "Received an ACK for a skipped packet number")
errAckForUnsentPacket = qerr.Error(qerr.InvalidAckData, "Received ACK for an unsent package")
)
var errPacketNumberNotIncreasing = errors.New("Already sent a packet with a higher packet number.")
type sentPacketHandler struct {
lastSentPacketNumber protocol.PacketNumber
lastSentPacketTime time.Time
skippedPackets []protocol.PacketNumber
LargestAcked protocol.PacketNumber
largestReceivedPacketWithAck protocol.PacketNumber
packetHistory *PacketList
stopWaitingManager stopWaitingManager
retransmissionQueue []*Packet
bytesInFlight protocol.ByteCount
rttStats *congestion.RTTStats
congestion congestion.SendAlgorithm
consecutiveRTOCount uint32
}
// NewSentPacketHandler creates a new sentPacketHandler
func NewSentPacketHandler(rttStats *congestion.RTTStats) SentPacketHandler {
congestion := congestion.NewCubicSender(
congestion.DefaultClock{},
rttStats,
false, /* don't use reno since chromium doesn't (why?) */
protocol.InitialCongestionWindow,
protocol.DefaultMaxCongestionWindow,
)
return &sentPacketHandler{
packetHistory: NewPacketList(),
stopWaitingManager: stopWaitingManager{},
rttStats: rttStats,
congestion: congestion,
}
}
func (h *sentPacketHandler) ackPacket(packetElement *PacketElement) {
packet := &packetElement.Value
h.bytesInFlight -= packet.Length
h.packetHistory.Remove(packetElement)
}
// nackPacket NACKs a packet
// it returns true if a FastRetransmissions was triggered
func (h *sentPacketHandler) nackPacket(packetElement *PacketElement) bool {
packet := &packetElement.Value
packet.MissingReports++
if packet.MissingReports > protocol.RetransmissionThreshold {
utils.Debugf("\tQueueing packet 0x%x for retransmission (fast)", packet.PacketNumber)
h.queuePacketForRetransmission(packetElement)
return true
}
return false
}
// does NOT set packet.Retransmitted. This variable is not needed anymore
func (h *sentPacketHandler) queuePacketForRetransmission(packetElement *PacketElement) {
packet := &packetElement.Value
h.bytesInFlight -= packet.Length
h.retransmissionQueue = append(h.retransmissionQueue, packet)
h.packetHistory.Remove(packetElement)
// strictly speaking, this is only necessary for RTO retransmissions
// this is because FastRetransmissions are triggered by missing ranges in ACKs, and then the LargestAcked will already be higher than the packet number of the retransmitted packet
h.stopWaitingManager.QueuedRetransmissionForPacketNumber(packet.PacketNumber)
}
func (h *sentPacketHandler) largestInOrderAcked() protocol.PacketNumber {
if f := h.packetHistory.Front(); f != nil {
return f.Value.PacketNumber - 1
}
return h.LargestAcked
}
func (h *sentPacketHandler) SentPacket(packet *Packet) error {
if packet.PacketNumber <= h.lastSentPacketNumber {
return errPacketNumberNotIncreasing
}
for p := h.lastSentPacketNumber + 1; p < packet.PacketNumber; p++ {
h.skippedPackets = append(h.skippedPackets, p)
if len(h.skippedPackets) > protocol.MaxTrackedSkippedPackets {
h.skippedPackets = h.skippedPackets[1:]
}
}
now := time.Now()
h.lastSentPacketTime = now
packet.SendTime = now
if packet.Length == 0 {
return errors.New("SentPacketHandler: packet cannot be empty")
}
h.bytesInFlight += packet.Length
h.lastSentPacketNumber = packet.PacketNumber
h.packetHistory.PushBack(*packet)
h.congestion.OnPacketSent(
now,
h.BytesInFlight(),
packet.PacketNumber,
packet.Length,
true, /* TODO: is retransmittable */
)
return nil
}
func (h *sentPacketHandler) ReceivedAck(ackFrame *frames.AckFrame, withPacketNumber protocol.PacketNumber, rcvTime time.Time) error {
if ackFrame.LargestAcked > h.lastSentPacketNumber {
return errAckForUnsentPacket
}
// duplicate or out-of-order ACK
if withPacketNumber <= h.largestReceivedPacketWithAck {
return ErrDuplicateOrOutOfOrderAck
}
h.largestReceivedPacketWithAck = withPacketNumber
// ignore repeated ACK (ACKs that don't have a higher LargestAcked than the last ACK)
if ackFrame.LargestAcked <= h.largestInOrderAcked() {
return nil
}
// check if it acks any packets that were skipped
for _, p := range h.skippedPackets {
if ackFrame.AcksPacket(p) {
return ErrAckForSkippedPacket
}
}
h.LargestAcked = ackFrame.LargestAcked
var ackedPackets congestion.PacketVector
var lostPackets congestion.PacketVector
ackRangeIndex := 0
rttUpdated := false
var el, elNext *PacketElement
for el = h.packetHistory.Front(); el != nil; el = elNext {
// determine the next list element right at the beginning, because el.Next() is not avaible anymore, when the list element is deleted (i.e. when the packet is ACKed)
elNext = el.Next()
packet := el.Value
packetNumber := packet.PacketNumber
// NACK packets below the LowestAcked
if packetNumber < ackFrame.LowestAcked {
retransmitted := h.nackPacket(el)
if retransmitted {
lostPackets = append(lostPackets, congestion.PacketInfo{Number: packetNumber, Length: packet.Length})
}
continue
}
// Update the RTT
if packetNumber == h.LargestAcked {
rttUpdated = true
timeDelta := rcvTime.Sub(packet.SendTime)
h.rttStats.UpdateRTT(timeDelta, ackFrame.DelayTime, rcvTime)
if utils.Debug() {
utils.Debugf("\tEstimated RTT: %dms", h.rttStats.SmoothedRTT()/time.Millisecond)
}
}
if packetNumber > ackFrame.LargestAcked {
break
}
if ackFrame.HasMissingRanges() {
ackRange := ackFrame.AckRanges[len(ackFrame.AckRanges)-1-ackRangeIndex]
for packetNumber > ackRange.LastPacketNumber && ackRangeIndex < len(ackFrame.AckRanges)-1 {
ackRangeIndex++
ackRange = ackFrame.AckRanges[len(ackFrame.AckRanges)-1-ackRangeIndex]
}
if packetNumber >= ackRange.FirstPacketNumber { // packet i contained in ACK range
if packetNumber > ackRange.LastPacketNumber {
return fmt.Errorf("BUG: ackhandler would have acked wrong packet 0x%x, while evaluating range 0x%x -> 0x%x", packetNumber, ackRange.FirstPacketNumber, ackRange.LastPacketNumber)
}
h.ackPacket(el)
ackedPackets = append(ackedPackets, congestion.PacketInfo{Number: packetNumber, Length: packet.Length})
} else {
retransmitted := h.nackPacket(el)
if retransmitted {
lostPackets = append(lostPackets, congestion.PacketInfo{Number: packetNumber, Length: packet.Length})
}
}
} else {
h.ackPacket(el)
ackedPackets = append(ackedPackets, congestion.PacketInfo{Number: packetNumber, Length: packet.Length})
}
}
if rttUpdated {
// Reset counter if a new packet was acked
h.consecutiveRTOCount = 0
}
h.garbageCollectSkippedPackets()
h.stopWaitingManager.ReceivedAck(ackFrame)
h.congestion.OnCongestionEvent(
rttUpdated,
h.BytesInFlight(),
ackedPackets,
lostPackets,
)
return nil
}
func (h *sentPacketHandler) DequeuePacketForRetransmission() *Packet {
if len(h.retransmissionQueue) == 0 {
return nil
}
if len(h.retransmissionQueue) > 0 {
queueLen := len(h.retransmissionQueue)
// packets are usually NACKed in descending order. So use the slice as a stack
packet := h.retransmissionQueue[queueLen-1]
h.retransmissionQueue = h.retransmissionQueue[:queueLen-1]
return packet
}
return nil
}
func (h *sentPacketHandler) BytesInFlight() protocol.ByteCount {
return h.bytesInFlight
}
func (h *sentPacketHandler) GetLeastUnacked() protocol.PacketNumber {
return h.largestInOrderAcked() + 1
}
func (h *sentPacketHandler) GetStopWaitingFrame(force bool) *frames.StopWaitingFrame {
return h.stopWaitingManager.GetStopWaitingFrame(force)
}
func (h *sentPacketHandler) SendingAllowed() bool {
congestionLimited := h.BytesInFlight() > h.congestion.GetCongestionWindow()
maxTrackedLimited := protocol.PacketNumber(len(h.retransmissionQueue)+h.packetHistory.Len()) >= protocol.MaxTrackedSentPackets
return !(congestionLimited || maxTrackedLimited)
}
func (h *sentPacketHandler) CheckForError() error {
length := len(h.retransmissionQueue) + h.packetHistory.Len()
if protocol.PacketNumber(length) > protocol.MaxTrackedSentPackets {
return ErrTooManyTrackedSentPackets
}
return nil
}
func (h *sentPacketHandler) MaybeQueueRTOs() {
if time.Now().Before(h.TimeOfFirstRTO()) {
return
}
// Always queue the two oldest packets
if h.packetHistory.Front() != nil {
h.queueRTO(h.packetHistory.Front())
}
if h.packetHistory.Front() != nil {
h.queueRTO(h.packetHistory.Front())
}
// Reset the RTO timer here, since it's not clear that this packet contained any retransmittable frames
h.lastSentPacketTime = time.Now()
h.consecutiveRTOCount++
}
func (h *sentPacketHandler) queueRTO(el *PacketElement) {
packet := &el.Value
packetsLost := congestion.PacketVector{congestion.PacketInfo{
Number: packet.PacketNumber,
Length: packet.Length,
}}
h.congestion.OnCongestionEvent(false, h.BytesInFlight(), nil, packetsLost)
h.congestion.OnRetransmissionTimeout(true)
utils.Debugf("\tQueueing packet 0x%x for retransmission (RTO)", packet.PacketNumber)
h.queuePacketForRetransmission(el)
}
func (h *sentPacketHandler) getRTO() time.Duration {
rto := h.congestion.RetransmissionDelay()
if rto == 0 {
rto = protocol.DefaultRetransmissionTime
}
rto = utils.MaxDuration(rto, protocol.MinRetransmissionTime)
// Exponential backoff
rto *= 1 << h.consecutiveRTOCount
return utils.MinDuration(rto, protocol.MaxRetransmissionTime)
}
func (h *sentPacketHandler) TimeOfFirstRTO() time.Time {
if h.lastSentPacketTime.IsZero() {
return time.Time{}
}
return h.lastSentPacketTime.Add(h.getRTO())
}
func (h *sentPacketHandler) garbageCollectSkippedPackets() {
lioa := h.largestInOrderAcked()
deleteIndex := 0
for i, p := range h.skippedPackets {
if p <= lioa {
deleteIndex = i + 1
}
}
h.skippedPackets = h.skippedPackets[deleteIndex:]
}
| 1 | 5,800 | Maybe move all the Loss Recovery constants to a separate file. | lucas-clemente-quic-go | go |
@@ -145,6 +145,7 @@ func (d *Disk) validate(ctx context.Context, s *Step) DError {
}
type diskAttachment struct {
+ diskName string
mode string
attacher, detacher *Step
} | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"context"
"encoding/json"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
var (
diskURLRgx = regexp.MustCompile(fmt.Sprintf(`^(projects/(?P<project>%[1]s)/)?zones/(?P<zone>%[2]s)/disks/(?P<disk>%[2]s)(/resize)?$`, projectRgxStr, rfc1035))
deviceNameURLRgx = regexp.MustCompile(fmt.Sprintf(`^(projects/(?P<project>%[1]s)/)?zones/(?P<zone>%[2]s)/devices/(?P<disk>%[2]s)$`, projectRgxStr, rfc1035))
)
// diskExists should only be used during validation for existing GCE disks
// and should not be relied or populated for daisy created resources.
func (w *Workflow) diskExists(project, zone, disk string) (bool, DError) {
return w.diskCache.resourceExists(func(project, zone string, opts ...daisyCompute.ListCallOption) (interface{}, error) {
return w.ComputeClient.ListDisks(project, zone)
}, project, zone, disk)
}
// isDiskAttached should only be used during validation for existing attached GCE disks
// and should not be relied or populated for daisy created resources.
func isDiskAttached(client daisyCompute.Client, deviceName, project, zone, instance string) (bool, DError) {
i, err := client.GetInstance(project, zone, instance)
if err != nil {
return false, Errf("failed to get instance info for checking attached disks: %v", err)
}
parts := strings.Split(deviceName, "/")
realName := parts[len(parts)-1]
for _, d := range i.Disks {
if d.DeviceName == realName {
return true, nil
}
}
return false, nil
}
// Disk is used to create a GCE disk in a project.
type Disk struct {
compute.Disk
Resource
// If this is enabled, then WINDOWS will be added to the
// disk's guestOsFeatures. This is a string since daisy
// replaces variables after JSON has been parsed.
// (If it were boolean, the JSON marshaller throws
// an error when it sees something like `${is_windows}`)
IsWindows string `json:"isWindows,omitempty"`
// Size of this disk.
SizeGb string `json:"sizeGb,omitempty"`
// Fallback to pd-standard when quota is not enough for higher-level pd
FallbackToPdStandard bool `json:"fallbackToPdStandard,omitempty"`
}
// MarshalJSON is a hacky workaround to prevent Disk from using compute.Disk's implementation.
func (d *Disk) MarshalJSON() ([]byte, error) {
return json.Marshal(*d)
}
func (d *Disk) populate(ctx context.Context, s *Step) DError {
var errs DError
d.Name, d.Zone, errs = d.Resource.populateWithZone(ctx, s, d.Name, d.Zone)
d.Description = strOr(d.Description, fmt.Sprintf("Disk created by Daisy in workflow %q on behalf of %s.", s.w.Name, s.w.username))
if d.SizeGb != "" {
size, err := strconv.ParseInt(d.SizeGb, 10, 64)
if err != nil {
errs = addErrs(errs, Errf("cannot parse SizeGb: %s, err: %v", d.SizeGb, err))
}
d.Disk.SizeGb = size
}
if d.IsWindows != "" {
isWindows, err := strconv.ParseBool(d.IsWindows)
if err != nil {
errs = addErrs(errs, Errf("cannot parse IsWindows as boolean: %s, err: %v", d.IsWindows, err))
}
if isWindows {
d.GuestOsFeatures = CombineGuestOSFeatures(d.GuestOsFeatures, "WINDOWS")
}
}
if imageURLRgx.MatchString(d.SourceImage) {
d.SourceImage = extendPartialURL(d.SourceImage, d.Project)
}
if d.Type == "" {
d.Type = fmt.Sprintf("projects/%s/zones/%s/diskTypes/pd-standard", d.Project, d.Zone)
} else if diskTypeURLRgx.MatchString(d.Type) {
d.Type = extendPartialURL(d.Type, d.Project)
} else {
d.Type = fmt.Sprintf("projects/%s/zones/%s/diskTypes/%s", d.Project, d.Zone, d.Type)
}
d.link = fmt.Sprintf("projects/%s/zones/%s/disks/%s", d.Project, d.Zone, d.Name)
return errs
}
func (d *Disk) validate(ctx context.Context, s *Step) DError {
pre := fmt.Sprintf("cannot create disk %q", d.daisyName)
errs := d.Resource.validateWithZone(ctx, s, d.Zone, pre)
if !diskTypeURLRgx.MatchString(d.Type) {
errs = addErrs(errs, Errf("%s: bad disk type: %q", pre, d.Type))
}
if d.SourceImage != "" {
if _, err := s.w.images.regUse(d.SourceImage, s); err != nil {
errs = addErrs(errs, Errf("%s: can't use image %q: %v", pre, d.SourceImage, err))
}
} else if d.SourceSnapshot != "" {
if _, err := s.w.snapshots.regUse(d.SourceSnapshot, s); err != nil {
errs = addErrs(errs, Errf("%s: can't use snapshot %q: %v", pre, d.SourceSnapshot, err))
}
} else if d.Disk.SizeGb == 0 {
errs = addErrs(errs, Errf("%s: SizeGb, SourceSnapshot or SourceImage not set", pre))
}
// Register creation.
errs = addErrs(errs, s.w.disks.regCreate(d.daisyName, &d.Resource, s, false))
return errs
}
type diskAttachment struct {
mode string
attacher, detacher *Step
}
type diskRegistry struct {
baseResourceRegistry
attachments map[string]map[string]*diskAttachment // map (disk, instance) -> attachment
testDetachHelper func(dName, iName string, s *Step) DError
}
func newDiskRegistry(w *Workflow) *diskRegistry {
dr := &diskRegistry{baseResourceRegistry: baseResourceRegistry{w: w, typeName: "disk", urlRgx: diskURLRgx}}
dr.baseResourceRegistry.deleteFn = dr.deleteFn
dr.init()
return dr
}
func (dr *diskRegistry) init() {
dr.baseResourceRegistry.init()
dr.attachments = map[string]map[string]*diskAttachment{}
}
func (dr *diskRegistry) deleteFn(res *Resource) DError {
m := NamedSubexp(diskURLRgx, res.link)
err := dr.w.ComputeClient.DeleteDisk(m["project"], m["zone"], m["disk"])
if gErr, ok := err.(*googleapi.Error); ok && gErr.Code == http.StatusNotFound {
return typedErr(resourceDNEError, "failed to delete disk", err)
}
return newErr("failed to delete disk", err)
}
// detachHelper marks s as the detacher between dName and iName.
// Returns an error if the detacher doesn't depend on the attacher.
func (dr *diskRegistry) detachHelper(dName, iName string, isAttached bool, s *Step) DError {
if dr.testDetachHelper != nil {
return dr.testDetachHelper(dName, iName, s)
}
// if the disk has already been attached before workflow is executed, skip validating its attacher
if isAttached {
return nil
}
pre := fmt.Sprintf("step %q cannot detach disk %q from instance %q", s.name, dName, iName)
var att *diskAttachment
// if the disk has already been attached before workflow is executed, skip validating its attacher
if isAttached {
return nil
}
if im, _ := dr.attachments[dName]; im == nil {
return Errf("%s: not attached", pre)
} else if att, _ = im[iName]; att == nil {
return Errf("%s: not attached", pre)
} else if att.detacher != nil {
return Errf("%s: already detached or concurrently detached by step %q", pre, att.detacher.name)
} else if !s.nestedDepends(att.attacher) {
return Errf("%s: step %q does not depend on attaching step %q", pre, s.name, att.attacher.name)
}
att.detacher = s
return nil
}
// registerAttachment is called by Instance.regCreate and AttachDisks.validate and marks a disk as attached to an instance by Step s.
func (dr *diskRegistry) regAttach(dName, iName, mode string, s *Step) DError {
dr.mx.Lock()
defer dr.mx.Unlock()
pre := fmt.Sprintf("step %q cannot attach disk %q to instance %q", s.name, dName, iName)
var errs DError
// Iterate over disk's attachments. Check for concurrent conflicts.
// Step s is concurrent with other attachments if the attachment detacher == nil
// or s does not depend on the detacher.
// If this is a repeat attachment (same disk and instance already attached), do nothing and return.
for attIName, att := range dr.attachments[dName] {
// Is this a concurrent attachment?
if att.detacher == nil || !s.nestedDepends(att.detacher) {
if attIName == iName {
errs = addErrs(errs, Errf("%s: concurrently attached by step %q", pre, att.attacher.name))
return nil // this is a repeat attachment to the same instance -- does nothing
} else if strIn(diskModeRW, []string{mode, att.mode}) {
// Can't have concurrent attachment in RW mode.
return Errf(
"%s: concurrent RW attachment of disk %q between instances %q (%s) and %q (%s)",
pre, dName, iName, mode, attIName, att.mode)
}
}
}
var im map[string]*diskAttachment
if im, _ = dr.attachments[dName]; im == nil {
im = map[string]*diskAttachment{}
dr.attachments[dName] = im
}
im[iName] = &diskAttachment{mode: mode, attacher: s}
return nil
}
// regDetach marks s as the detacher for the dName disk and iName instance.
// Returns an error if dName or iName don't exist or if detachHelper returns an error.
func (dr *diskRegistry) regDetach(dName, iName string, isAttached bool, s *Step) DError {
dr.mx.Lock()
defer dr.mx.Unlock()
return dr.detachHelper(dName, iName, isAttached, s)
}
// regDetachAll is called by Instance.regDelete and registers Step s as the detacher for all disks currently attached to iName.
func (dr *diskRegistry) regDetachAll(iName string, s *Step) DError {
dr.mx.Lock()
defer dr.mx.Unlock()
var errs DError
// For every disk.
for dName, im := range dr.attachments {
// Check if instance attached.
if att, _ := im[iName]; att == nil || att.detacher != nil {
continue
}
// If yes, detach.
errs = addErrs(dr.detachHelper(dName, iName, false, s))
}
return errs
}
| 1 | 11,518 | this field is added so that we can find disk name by device name from attachments | GoogleCloudPlatform-compute-image-tools | go |
@@ -1,5 +1,6 @@
package com.fsck.k9.activity;
+
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent; | 1 | package com.fsck.k9.activity;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.text.format.DateUtils;
import com.fsck.k9.Account;
import com.fsck.k9.AccountStats;
import com.fsck.k9.K9;
import com.fsck.k9.R;
import com.fsck.k9.controller.SimpleMessagingListener;
import com.fsck.k9.service.MailService;
public class ActivityListener extends SimpleMessagingListener {
private Account mAccount = null;
private String mLoadingFolderName = null;
private String mLoadingHeaderFolderName = null;
private String mLoadingAccountDescription = null;
private String mSendingAccountDescription = null;
private int mFolderCompleted = 0;
private int mFolderTotal = 0;
private String mProcessingAccountDescription = null;
private String mProcessingCommandTitle = null;
private BroadcastReceiver mTickReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
informUserOfStatus();
}
};
public String getOperation(Context context) {
if (mLoadingAccountDescription != null
|| mSendingAccountDescription != null
|| mLoadingHeaderFolderName != null
|| mProcessingAccountDescription != null) {
return getActionInProgressOperation(context);
} else {
long nextPollTime = MailService.getNextPollTime();
if (nextPollTime != -1) {
return context.getString(R.string.status_next_poll,
DateUtils.getRelativeTimeSpanString(nextPollTime, System.currentTimeMillis(),
DateUtils.MINUTE_IN_MILLIS, 0));
} else if (K9.isDebug() && MailService.isSyncDisabled()) {
if (MailService.hasNoConnectivity()) {
return context.getString(R.string.status_no_network);
} else if (MailService.isSyncNoBackground()) {
return context.getString(R.string.status_no_background);
} else if (MailService.isSyncBlocked()) {
return context.getString(R.string.status_syncing_blocked);
} else if (MailService.isPollAndPushDisabled()) {
return context.getString(R.string.status_poll_and_push_disabled);
} else {
return context.getString(R.string.status_syncing_off);
}
} else if (MailService.isSyncDisabled()) {
return context.getString(R.string.status_syncing_off);
} else {
return "";
}
}
}
private String getActionInProgressOperation(Context context) {
String progress = (mFolderTotal > 0 ?
context.getString(R.string.folder_progress, mFolderCompleted, mFolderTotal) : "");
if (mLoadingFolderName != null || mLoadingHeaderFolderName != null) {
String displayName = null;
if (mLoadingHeaderFolderName != null) {
displayName = mLoadingHeaderFolderName;
} else if (mLoadingFolderName != null) {
displayName = mLoadingFolderName;
}
if ((mAccount != null) && (mAccount.getInboxFolderName() != null)
&& mAccount.getInboxFolderName().equalsIgnoreCase(displayName)) {
displayName = context.getString(R.string.special_mailbox_name_inbox);
} else if ((mAccount != null) && (mAccount.getOutboxFolderName() != null)
&& mAccount.getOutboxFolderName().equals(displayName)) {
displayName = context.getString(R.string.special_mailbox_name_outbox);
}
if (mLoadingHeaderFolderName != null) {
return context.getString(R.string.status_loading_account_folder_headers,
mLoadingAccountDescription, displayName, progress);
} else {
return context.getString(R.string.status_loading_account_folder,
mLoadingAccountDescription, displayName, progress);
}
}
else if (mSendingAccountDescription != null) {
return context.getString(R.string.status_sending_account, mSendingAccountDescription, progress);
} else if (mProcessingAccountDescription != null) {
return context.getString(R.string.status_processing_account, mProcessingAccountDescription,
mProcessingCommandTitle != null ? mProcessingCommandTitle : "",
progress);
} else {
return "";
}
}
public void onResume(Context context) {
context.registerReceiver(mTickReceiver, new IntentFilter(Intent.ACTION_TIME_TICK));
}
public void onPause(Context context) {
context.unregisterReceiver(mTickReceiver);
}
public void informUserOfStatus() {
}
@Override
public void synchronizeMailboxFinished(
Account account,
String folder,
int totalMessagesInMailbox,
int numNewMessages) {
mLoadingAccountDescription = null;
mLoadingFolderName = null;
mAccount = null;
informUserOfStatus();
}
@Override
public void synchronizeMailboxStarted(Account account, String folder) {
mLoadingAccountDescription = account.getDescription();
mLoadingFolderName = folder;
mAccount = account;
mFolderCompleted = 0;
mFolderTotal = 0;
informUserOfStatus();
}
@Override
public void synchronizeMailboxHeadersStarted(Account account, String folder) {
mLoadingAccountDescription = account.getDescription();
mLoadingHeaderFolderName = folder;
informUserOfStatus();
}
@Override
public void synchronizeMailboxHeadersProgress(Account account, String folder, int completed, int total) {
mFolderCompleted = completed;
mFolderTotal = total;
informUserOfStatus();
}
@Override
public void synchronizeMailboxHeadersFinished(Account account, String folder,
int total, int completed) {
mLoadingHeaderFolderName = null;
mFolderCompleted = 0;
mFolderTotal = 0;
informUserOfStatus();
}
@Override
public void synchronizeMailboxProgress(Account account, String folder, int completed, int total) {
mFolderCompleted = completed;
mFolderTotal = total;
informUserOfStatus();
}
@Override
public void synchronizeMailboxFailed(Account account, String folder,
String message) {
mLoadingAccountDescription = null;
mLoadingHeaderFolderName = null;
mLoadingFolderName = null;
mAccount = null;
informUserOfStatus();
}
@Override
public void sendPendingMessagesStarted(Account account) {
mSendingAccountDescription = account.getDescription();
informUserOfStatus();
}
@Override
public void sendPendingMessagesCompleted(Account account) {
mSendingAccountDescription = null;
informUserOfStatus();
}
@Override
public void sendPendingMessagesFailed(Account account) {
mSendingAccountDescription = null;
informUserOfStatus();
}
@Override
public void pendingCommandsProcessing(Account account) {
mProcessingAccountDescription = account.getDescription();
mFolderCompleted = 0;
mFolderTotal = 0;
informUserOfStatus();
}
@Override
public void pendingCommandsFinished(Account account) {
mProcessingAccountDescription = null;
informUserOfStatus();
}
@Override
public void pendingCommandStarted(Account account, String commandTitle) {
mProcessingCommandTitle = commandTitle;
informUserOfStatus();
}
@Override
public void pendingCommandCompleted(Account account, String commandTitle) {
mProcessingCommandTitle = null;
informUserOfStatus();
}
@Override
public void searchStats(AccountStats stats) {
informUserOfStatus();
}
@Override
public void systemStatusChanged() {
informUserOfStatus();
}
@Override
public void folderStatusChanged(Account account, String folder, int unreadMessageCount) {
informUserOfStatus();
}
public int getFolderCompleted() {
return mFolderCompleted;
}
public int getFolderTotal() {
return mFolderTotal;
}
}
| 1 | 16,171 | Unnecessary new line | k9mail-k-9 | java |
@@ -1893,13 +1893,15 @@ func (c *linuxContainer) currentState() (*State, error) {
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
for _, nsType := range configs.NamespaceTypes() {
- if !configs.IsNamespaceSupported(nsType) {
+ if _, ok := state.NamespacePaths[nsType]; ok {
continue
}
- if _, ok := state.NamespacePaths[nsType]; !ok {
- ns := configs.Namespace{Type: nsType}
- state.NamespacePaths[ns.Type] = ns.GetPath(pid)
+ if !configs.IsNamespaceSupported(nsType) {
+ continue
}
+ ns := configs.Namespace{Type: nsType}
+ state.NamespacePaths[ns.Type] = ns.GetPath(pid)
+
}
}
return state, nil | 1 | // +build linux
package libcontainer
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/checkpoint-restore/go-criu/v4"
criurpc "github.com/checkpoint-restore/go-criu/v4/rpc"
"github.com/golang/protobuf/proto"
errorsf "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
)
const stdioFdCount = 3
type linuxContainer struct {
id string
root string
config *configs.Config
cgroupManager cgroups.Manager
intelRdtManager intelrdt.Manager
initPath string
initArgs []string
initProcess parentProcess
initProcessStartTime uint64
criuPath string
newuidmapPath string
newgidmapPath string
m sync.Mutex
criuVersion int
state containerState
created time.Time
}
// State represents a running container's state
type State struct {
BaseState
// Platform specific fields below here
// Specified if the container was started under the rootless mode.
// Set to true if BaseState.Config.RootlessEUID && BaseState.Config.RootlessCgroups
Rootless bool `json:"rootless"`
// Paths to all the container's cgroups, as returned by (*cgroups.Manager).GetPaths
//
// For cgroup v1, a key is cgroup subsystem name, and the value is the path
// to the cgroup for this subsystem.
//
// For cgroup v2 unified hierarchy, a key is "", and the value is the unified path.
CgroupPaths map[string]string `json:"cgroup_paths"`
// NamespacePaths are filepaths to the container's namespaces. Key is the namespace type
// with the value as the path.
NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"`
// Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore
ExternalDescriptors []string `json:"external_descriptors,omitempty"`
// Intel RDT "resource control" filesystem path
IntelRdtPath string `json:"intel_rdt_path"`
}
// Container is a libcontainer container object.
//
// Each container is thread-safe within the same process. Since a container can
// be destroyed by a separate process, any function may return that the container
// was not found.
type Container interface {
BaseContainer
// Methods below here are platform specific
// Checkpoint checkpoints the running container's state to disk using the criu(8) utility.
//
// errors:
// Systemerror - System error.
Checkpoint(criuOpts *CriuOpts) error
// Restore restores the checkpointed container to a running state using the criu(8) utility.
//
// errors:
// Systemerror - System error.
Restore(process *Process, criuOpts *CriuOpts) error
// If the Container state is RUNNING or CREATED, sets the Container state to PAUSING and pauses
// the execution of any user processes. Asynchronously, when the container finished being paused the
// state is changed to PAUSED.
// If the Container state is PAUSED, do nothing.
//
// errors:
// ContainerNotExists - Container no longer exists,
// ContainerNotRunning - Container not running or created,
// Systemerror - System error.
Pause() error
// If the Container state is PAUSED, resumes the execution of any user processes in the
// Container before setting the Container state to RUNNING.
// If the Container state is RUNNING, do nothing.
//
// errors:
// ContainerNotExists - Container no longer exists,
// ContainerNotPaused - Container is not paused,
// Systemerror - System error.
Resume() error
// NotifyOOM returns a read-only channel signaling when the container receives an OOM notification.
//
// errors:
// Systemerror - System error.
NotifyOOM() (<-chan struct{}, error)
// NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level
//
// errors:
// Systemerror - System error.
NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error)
}
// ID returns the container's unique ID
func (c *linuxContainer) ID() string {
return c.id
}
// Config returns the container's configuration
func (c *linuxContainer) Config() configs.Config {
return *c.config
}
func (c *linuxContainer) Status() (Status, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentStatus()
}
func (c *linuxContainer) State() (*State, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentState()
}
func (c *linuxContainer) OCIState() (*specs.State, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentOCIState()
}
func (c *linuxContainer) Processes() ([]int, error) {
var pids []int
status, err := c.currentStatus()
if err != nil {
return pids, err
}
// for systemd cgroup, the unit's cgroup path will be auto removed if container's all processes exited
if status == Stopped && !c.cgroupManager.Exists() {
return pids, nil
}
pids, err = c.cgroupManager.GetAllPids()
if err != nil {
return nil, newSystemErrorWithCause(err, "getting all container pids from cgroups")
}
return pids, nil
}
func (c *linuxContainer) Stats() (*Stats, error) {
var (
err error
stats = &Stats{}
)
if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil {
return stats, newSystemErrorWithCause(err, "getting container stats from cgroups")
}
if c.intelRdtManager != nil {
if stats.IntelRdtStats, err = c.intelRdtManager.GetStats(); err != nil {
return stats, newSystemErrorWithCause(err, "getting container's Intel RDT stats")
}
}
for _, iface := range c.config.Networks {
switch iface.Type {
case "veth":
istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
if err != nil {
return stats, newSystemErrorWithCausef(err, "getting network stats for interface %q", iface.HostInterfaceName)
}
stats.Interfaces = append(stats.Interfaces, istats)
}
}
return stats, nil
}
func (c *linuxContainer) Set(config configs.Config) error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status == Stopped {
return newGenericError(errors.New("container not running"), ContainerNotRunning)
}
if err := c.cgroupManager.Set(&config); err != nil {
// Set configs back
if err2 := c.cgroupManager.Set(c.config); err2 != nil {
logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
return err
}
if c.intelRdtManager != nil {
if err := c.intelRdtManager.Set(&config); err != nil {
// Set configs back
if err2 := c.cgroupManager.Set(c.config); err2 != nil {
logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
if err2 := c.intelRdtManager.Set(c.config); err2 != nil {
logrus.Warnf("Setting back intelrdt configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
return err
}
}
// After config setting succeed, update config and states
c.config = &config
_, err = c.updateState(nil)
return err
}
func (c *linuxContainer) Start(process *Process) error {
c.m.Lock()
defer c.m.Unlock()
if c.config.Cgroups.Resources.SkipDevices {
return newGenericError(errors.New("can't start container with SkipDevices set"), ConfigInvalid)
}
if process.Init {
if err := c.createExecFifo(); err != nil {
return err
}
}
if err := c.start(process); err != nil {
if process.Init {
c.deleteExecFifo()
}
return err
}
return nil
}
func (c *linuxContainer) Run(process *Process) error {
if err := c.Start(process); err != nil {
return err
}
if process.Init {
return c.exec()
}
return nil
}
func (c *linuxContainer) Exec() error {
c.m.Lock()
defer c.m.Unlock()
return c.exec()
}
func (c *linuxContainer) exec() error {
path := filepath.Join(c.root, execFifoFilename)
pid := c.initProcess.pid()
blockingFifoOpenCh := awaitFifoOpen(path)
for {
select {
case result := <-blockingFifoOpenCh:
return handleFifoResult(result)
case <-time.After(time.Millisecond * 100):
stat, err := system.Stat(pid)
if err != nil || stat.State == system.Zombie {
// could be because process started, ran, and completed between our 100ms timeout and our system.Stat() check.
// see if the fifo exists and has data (with a non-blocking open, which will succeed if the writing process is complete).
if err := handleFifoResult(fifoOpen(path, false)); err != nil {
return errors.New("container process is already dead")
}
return nil
}
}
}
}
func readFromExecFifo(execFifo io.Reader) error {
data, err := ioutil.ReadAll(execFifo)
if err != nil {
return err
}
if len(data) <= 0 {
return errors.New("cannot start an already running container")
}
return nil
}
func awaitFifoOpen(path string) <-chan openResult {
fifoOpened := make(chan openResult)
go func() {
result := fifoOpen(path, true)
fifoOpened <- result
}()
return fifoOpened
}
func fifoOpen(path string, block bool) openResult {
flags := os.O_RDONLY
if !block {
flags |= unix.O_NONBLOCK
}
f, err := os.OpenFile(path, flags, 0)
if err != nil {
return openResult{err: newSystemErrorWithCause(err, "open exec fifo for reading")}
}
return openResult{file: f}
}
func handleFifoResult(result openResult) error {
if result.err != nil {
return result.err
}
f := result.file
defer f.Close()
if err := readFromExecFifo(f); err != nil {
return err
}
return os.Remove(f.Name())
}
type openResult struct {
file *os.File
err error
}
func (c *linuxContainer) start(process *Process) error {
parent, err := c.newParentProcess(process)
if err != nil {
return newSystemErrorWithCause(err, "creating new parent process")
}
parent.forwardChildLogs()
if err := parent.start(); err != nil {
return newSystemErrorWithCause(err, "starting container process")
}
if process.Init {
if c.config.Hooks != nil {
s, err := c.currentOCIState()
if err != nil {
return err
}
if err := c.config.Hooks[configs.Poststart].RunHooks(s); err != nil {
if err := ignoreTerminateErrors(parent.terminate()); err != nil {
logrus.Warn(errorsf.Wrapf(err, "Running Poststart hook"))
}
return err
}
}
}
return nil
}
func (c *linuxContainer) Signal(s os.Signal, all bool) error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if all {
// for systemd cgroup, the unit's cgroup path will be auto removed if container's all processes exited
if status == Stopped && !c.cgroupManager.Exists() {
return nil
}
return signalAllProcesses(c.cgroupManager, s)
}
// to avoid a PID reuse attack
if status == Running || status == Created || status == Paused {
if err := c.initProcess.signal(s); err != nil {
return newSystemErrorWithCause(err, "signaling init process")
}
return nil
}
return newGenericError(errors.New("container not running"), ContainerNotRunning)
}
func (c *linuxContainer) createExecFifo() error {
rootuid, err := c.Config().HostRootUID()
if err != nil {
return err
}
rootgid, err := c.Config().HostRootGID()
if err != nil {
return err
}
fifoName := filepath.Join(c.root, execFifoFilename)
if _, err := os.Stat(fifoName); err == nil {
return fmt.Errorf("exec fifo %s already exists", fifoName)
}
oldMask := unix.Umask(0000)
if err := unix.Mkfifo(fifoName, 0622); err != nil {
unix.Umask(oldMask)
return err
}
unix.Umask(oldMask)
return os.Chown(fifoName, rootuid, rootgid)
}
func (c *linuxContainer) deleteExecFifo() {
fifoName := filepath.Join(c.root, execFifoFilename)
os.Remove(fifoName)
}
// includeExecFifo opens the container's execfifo as a pathfd, so that the
// container cannot access the statedir (and the FIFO itself remains
// un-opened). It then adds the FifoFd to the given exec.Cmd as an inherited
// fd, with _LIBCONTAINER_FIFOFD set to its fd number.
func (c *linuxContainer) includeExecFifo(cmd *exec.Cmd) error {
fifoName := filepath.Join(c.root, execFifoFilename)
fifoFd, err := unix.Open(fifoName, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return err
}
cmd.ExtraFiles = append(cmd.ExtraFiles, os.NewFile(uintptr(fifoFd), fifoName))
cmd.Env = append(cmd.Env,
"_LIBCONTAINER_FIFOFD="+strconv.Itoa(stdioFdCount+len(cmd.ExtraFiles)-1))
return nil
}
func (c *linuxContainer) newParentProcess(p *Process) (parentProcess, error) {
parentInitPipe, childInitPipe, err := utils.NewSockPair("init")
if err != nil {
return nil, newSystemErrorWithCause(err, "creating new init pipe")
}
messageSockPair := filePair{parentInitPipe, childInitPipe}
parentLogPipe, childLogPipe, err := os.Pipe()
if err != nil {
return nil, fmt.Errorf("Unable to create the log pipe: %s", err)
}
logFilePair := filePair{parentLogPipe, childLogPipe}
cmd := c.commandTemplate(p, childInitPipe, childLogPipe)
if !p.Init {
return c.newSetnsProcess(p, cmd, messageSockPair, logFilePair)
}
// We only set up fifoFd if we're not doing a `runc exec`. The historic
// reason for this is that previously we would pass a dirfd that allowed
// for container rootfs escape (and not doing it in `runc exec` avoided
// that problem), but we no longer do that. However, there's no need to do
// this for `runc exec` so we just keep it this way to be safe.
if err := c.includeExecFifo(cmd); err != nil {
return nil, newSystemErrorWithCause(err, "including execfifo in cmd.Exec setup")
}
return c.newInitProcess(p, cmd, messageSockPair, logFilePair)
}
func (c *linuxContainer) commandTemplate(p *Process, childInitPipe *os.File, childLogPipe *os.File) *exec.Cmd {
cmd := exec.Command(c.initPath, c.initArgs[1:]...)
cmd.Args[0] = c.initArgs[0]
cmd.Stdin = p.Stdin
cmd.Stdout = p.Stdout
cmd.Stderr = p.Stderr
cmd.Dir = c.config.Rootfs
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = &unix.SysProcAttr{}
}
cmd.Env = append(cmd.Env, "GOMAXPROCS="+os.Getenv("GOMAXPROCS"))
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ExtraFiles...)
if p.ConsoleSocket != nil {
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ConsoleSocket)
cmd.Env = append(cmd.Env,
"_LIBCONTAINER_CONSOLE="+strconv.Itoa(stdioFdCount+len(cmd.ExtraFiles)-1),
)
}
cmd.ExtraFiles = append(cmd.ExtraFiles, childInitPipe)
cmd.Env = append(cmd.Env,
"_LIBCONTAINER_INITPIPE="+strconv.Itoa(stdioFdCount+len(cmd.ExtraFiles)-1),
"_LIBCONTAINER_STATEDIR="+c.root,
)
cmd.ExtraFiles = append(cmd.ExtraFiles, childLogPipe)
cmd.Env = append(cmd.Env,
"_LIBCONTAINER_LOGPIPE="+strconv.Itoa(stdioFdCount+len(cmd.ExtraFiles)-1),
"_LIBCONTAINER_LOGLEVEL="+p.LogLevel,
)
// NOTE: when running a container with no PID namespace and the parent process spawning the container is
// PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason
// even with the parent still running.
if c.config.ParentDeathSignal > 0 {
cmd.SysProcAttr.Pdeathsig = unix.Signal(c.config.ParentDeathSignal)
}
return cmd
}
func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*initProcess, error) {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard))
nsMaps := make(map[configs.NamespaceType]string)
for _, ns := range c.config.Namespaces {
if ns.Path != "" {
nsMaps[ns.Type] = ns.Path
}
}
_, sharePidns := nsMaps[configs.NEWPID]
data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps)
if err != nil {
return nil, err
}
init := &initProcess{
cmd: cmd,
messageSockPair: messageSockPair,
logFilePair: logFilePair,
manager: c.cgroupManager,
intelRdtManager: c.intelRdtManager,
config: c.newInitConfig(p),
container: c,
process: p,
bootstrapData: data,
sharePidns: sharePidns,
}
c.initProcess = init
return init, nil
}
func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*setnsProcess, error) {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns))
state, err := c.currentState()
if err != nil {
return nil, newSystemErrorWithCause(err, "getting container's current state")
}
// for setns process, we don't have to set cloneflags as the process namespaces
// will only be set via setns syscall
data, err := c.bootstrapData(0, state.NamespacePaths)
if err != nil {
return nil, err
}
return &setnsProcess{
cmd: cmd,
cgroupPaths: state.CgroupPaths,
rootlessCgroups: c.config.RootlessCgroups,
intelRdtPath: state.IntelRdtPath,
messageSockPair: messageSockPair,
logFilePair: logFilePair,
manager: c.cgroupManager,
config: c.newInitConfig(p),
process: p,
bootstrapData: data,
initProcessPid: state.InitProcessPid,
}, nil
}
func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
cfg := &initConfig{
Config: c.config,
Args: process.Args,
Env: process.Env,
User: process.User,
AdditionalGroups: process.AdditionalGroups,
Cwd: process.Cwd,
Capabilities: process.Capabilities,
PassedFilesCount: len(process.ExtraFiles),
ContainerId: c.ID(),
NoNewPrivileges: c.config.NoNewPrivileges,
RootlessEUID: c.config.RootlessEUID,
RootlessCgroups: c.config.RootlessCgroups,
AppArmorProfile: c.config.AppArmorProfile,
ProcessLabel: c.config.ProcessLabel,
Rlimits: c.config.Rlimits,
}
if process.NoNewPrivileges != nil {
cfg.NoNewPrivileges = *process.NoNewPrivileges
}
if process.AppArmorProfile != "" {
cfg.AppArmorProfile = process.AppArmorProfile
}
if process.Label != "" {
cfg.ProcessLabel = process.Label
}
if len(process.Rlimits) > 0 {
cfg.Rlimits = process.Rlimits
}
cfg.CreateConsole = process.ConsoleSocket != nil
cfg.ConsoleWidth = process.ConsoleWidth
cfg.ConsoleHeight = process.ConsoleHeight
return cfg
}
func (c *linuxContainer) Destroy() error {
c.m.Lock()
defer c.m.Unlock()
return c.state.destroy()
}
func (c *linuxContainer) Pause() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
switch status {
case Running, Created:
if err := c.cgroupManager.Freeze(configs.Frozen); err != nil {
return err
}
return c.state.transition(&pausedState{
c: c,
})
}
return newGenericError(fmt.Errorf("container not running or created: %s", status), ContainerNotRunning)
}
func (c *linuxContainer) Resume() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status != Paused {
return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused)
}
if err := c.cgroupManager.Freeze(configs.Thawed); err != nil {
return err
}
return c.state.transition(&runningState{
c: c,
})
}
func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) {
// XXX(cyphar): This requires cgroups.
if c.config.RootlessCgroups {
logrus.Warn("getting OOM notifications may fail if you don't have the full access to cgroups")
}
path := c.cgroupManager.Path("memory")
if cgroups.IsCgroup2UnifiedMode() {
return notifyOnOOMV2(path)
}
return notifyOnOOM(path)
}
func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) {
// XXX(cyphar): This requires cgroups.
if c.config.RootlessCgroups {
logrus.Warn("getting memory pressure notifications may fail if you don't have the full access to cgroups")
}
return notifyMemoryPressure(c.cgroupManager.Path("memory"), level)
}
var criuFeatures *criurpc.CriuFeatures
func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc.CriuOpts, criuFeat *criurpc.CriuFeatures) error {
t := criurpc.CriuReqType_FEATURE_CHECK
// make sure the features we are looking for are really not from
// some previous check
criuFeatures = nil
req := &criurpc.CriuReq{
Type: &t,
// Theoretically this should not be necessary but CRIU
// segfaults if Opts is empty.
// Fixed in CRIU 2.12
Opts: rpcOpts,
Features: criuFeat,
}
err := c.criuSwrk(nil, req, criuOpts, nil)
if err != nil {
logrus.Debugf("%s", err)
return errors.New("CRIU feature check failed")
}
logrus.Debugf("Feature check says: %s", criuFeatures)
missingFeatures := false
// The outer if checks if the fields actually exist
if (criuFeat.MemTrack != nil) &&
(criuFeatures.MemTrack != nil) {
// The inner if checks if they are set to true
if *criuFeat.MemTrack && !*criuFeatures.MemTrack {
missingFeatures = true
logrus.Debugf("CRIU does not support MemTrack")
}
}
// This needs to be repeated for every new feature check.
// Is there a way to put this in a function. Reflection?
if (criuFeat.LazyPages != nil) &&
(criuFeatures.LazyPages != nil) {
if *criuFeat.LazyPages && !*criuFeatures.LazyPages {
missingFeatures = true
logrus.Debugf("CRIU does not support LazyPages")
}
}
if missingFeatures {
return errors.New("CRIU is missing features")
}
return nil
}
func compareCriuVersion(criuVersion int, minVersion int) error {
// simple function to perform the actual version compare
if criuVersion < minVersion {
return fmt.Errorf("CRIU version %d must be %d or higher", criuVersion, minVersion)
}
return nil
}
// checkCriuVersion checks Criu version greater than or equal to minVersion
func (c *linuxContainer) checkCriuVersion(minVersion int) error {
// If the version of criu has already been determined there is no need
// to ask criu for the version again. Use the value from c.criuVersion.
if c.criuVersion != 0 {
return compareCriuVersion(c.criuVersion, minVersion)
}
criu := criu.MakeCriu()
criu.SetCriuPath(c.criuPath)
var err error
c.criuVersion, err = criu.GetCriuVersion()
if err != nil {
return fmt.Errorf("CRIU version check failed: %s", err)
}
return compareCriuVersion(c.criuVersion, minVersion)
}
const descriptorsFilename = "descriptors.json"
func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) {
mountDest := strings.TrimPrefix(m.Destination, c.config.Rootfs)
extMnt := &criurpc.ExtMountMap{
Key: proto.String(mountDest),
Val: proto.String(mountDest),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error {
for _, path := range c.config.MaskPaths {
fi, err := os.Stat(fmt.Sprintf("/proc/%d/root/%s", c.initProcess.pid(), path))
if err != nil {
if os.IsNotExist(err) {
continue
}
return err
}
if fi.IsDir() {
continue
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(path),
Val: proto.String("/dev/null"),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
return nil
}
func (c *linuxContainer) handleCriuConfigurationFile(rpcOpts *criurpc.CriuOpts) {
// CRIU will evaluate a configuration starting with release 3.11.
// Settings in the configuration file will overwrite RPC settings.
// Look for annotations. The annotation 'org.criu.config'
// specifies if CRIU should use a different, container specific
// configuration file.
_, annotations := utils.Annotations(c.config.Labels)
configFile, exists := annotations["org.criu.config"]
if exists {
// If the annotation 'org.criu.config' exists and is set
// to a non-empty string, tell CRIU to use that as a
// configuration file. If the file does not exist, CRIU
// will just ignore it.
if configFile != "" {
rpcOpts.ConfigFile = proto.String(configFile)
}
// If 'org.criu.config' exists and is set to an empty
// string, a runc specific CRIU configuration file will
// be not set at all.
} else {
// If the mentioned annotation has not been found, specify
// a default CRIU configuration file.
rpcOpts.ConfigFile = proto.String("/etc/criu/runc.conf")
}
}
func (c *linuxContainer) criuSupportsExtNS(t configs.NamespaceType) bool {
var minVersion int
switch t {
case configs.NEWNET:
// CRIU supports different external namespace with different released CRIU versions.
// For network namespaces to work we need at least criu 3.11.0 => 31100.
minVersion = 31100
case configs.NEWPID:
// For PID namespaces criu 31500 is needed.
minVersion = 31500
default:
return false
}
return c.checkCriuVersion(minVersion) == nil
}
func criuNsToKey(t configs.NamespaceType) string {
return "extRoot" + strings.Title(configs.NsName(t)) + "NS"
}
func (c *linuxContainer) handleCheckpointingExternalNamespaces(rpcOpts *criurpc.CriuOpts, t configs.NamespaceType) error {
if !c.criuSupportsExtNS(t) {
return nil
}
nsPath := c.config.Namespaces.PathOf(t)
if nsPath == "" {
return nil
}
// CRIU expects the information about an external namespace
// like this: --external <TYPE>[<inode>]:<key>
// This <key> is always 'extRoot<TYPE>NS'.
var ns unix.Stat_t
if err := unix.Stat(nsPath, &ns); err != nil {
return err
}
criuExternal := fmt.Sprintf("%s[%d]:%s", configs.NsName(t), ns.Ino, criuNsToKey(t))
rpcOpts.External = append(rpcOpts.External, criuExternal)
return nil
}
func (c *linuxContainer) handleRestoringNamespaces(rpcOpts *criurpc.CriuOpts, extraFiles *[]*os.File) error {
for _, ns := range c.config.Namespaces {
switch ns.Type {
case configs.NEWNET, configs.NEWPID:
// If the container is running in a network or PID namespace and has
// a path to the network or PID namespace configured, we will dump
// that network or PID namespace as an external namespace and we
// will expect that the namespace exists during restore.
// This basically means that CRIU will ignore the namespace
// and expect it to be setup correctly.
if err := c.handleRestoringExternalNamespaces(rpcOpts, extraFiles, ns.Type); err != nil {
return err
}
default:
// For all other namespaces except NET and PID CRIU has
// a simpler way of joining the existing namespace if set
nsPath := c.config.Namespaces.PathOf(ns.Type)
if nsPath == "" {
continue
}
if ns.Type == configs.NEWCGROUP {
// CRIU has no code to handle NEWCGROUP
return fmt.Errorf("Do not know how to handle namespace %v", ns.Type)
}
// CRIU has code to handle NEWTIME, but it does not seem to be defined in runc
// CRIU will issue a warning for NEWUSER:
// criu/namespaces.c: 'join-ns with user-namespace is not fully tested and dangerous'
rpcOpts.JoinNs = append(rpcOpts.JoinNs, &criurpc.JoinNamespace{
Ns: proto.String(configs.NsName(ns.Type)),
NsFile: proto.String(nsPath),
})
}
}
return nil
}
func (c *linuxContainer) handleRestoringExternalNamespaces(rpcOpts *criurpc.CriuOpts, extraFiles *[]*os.File, t configs.NamespaceType) error {
if !c.criuSupportsExtNS(t) {
return nil
}
nsPath := c.config.Namespaces.PathOf(t)
if nsPath == "" {
return nil
}
// CRIU wants the information about an existing namespace
// like this: --inherit-fd fd[<fd>]:<key>
// The <key> needs to be the same as during checkpointing.
// We are always using 'extRoot<TYPE>NS' as the key in this.
nsFd, err := os.Open(nsPath)
if err != nil {
logrus.Errorf("If a specific network namespace is defined it must exist: %s", err)
return fmt.Errorf("Requested network namespace %v does not exist", nsPath)
}
inheritFd := &criurpc.InheritFd{
Key: proto.String(criuNsToKey(t)),
// The offset of four is necessary because 0, 1, 2 and 3 are
// already used by stdin, stdout, stderr, 'criu swrk' socket.
Fd: proto.Int32(int32(4 + len(*extraFiles))),
}
rpcOpts.InheritFd = append(rpcOpts.InheritFd, inheritFd)
// All open FDs need to be transferred to CRIU via extraFiles
*extraFiles = append(*extraFiles, nsFd)
return nil
}
func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error {
c.m.Lock()
defer c.m.Unlock()
// Checkpoint is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS().
// (CLI prints a warning)
// TODO(avagin): Figure out how to make this work nicely. CRIU 2.0 has
// support for doing unprivileged dumps, but the setup of
// rootless containers might make this complicated.
// We are relying on the CRIU version RPC which was introduced with CRIU 3.0.0
if err := c.checkCriuVersion(30000); err != nil {
return err
}
if criuOpts.ImagesDirectory == "" {
return errors.New("invalid directory to save checkpoint")
}
// Since a container can be C/R'ed multiple times,
// the checkpoint directory may already exist.
if err := os.Mkdir(criuOpts.ImagesDirectory, 0700); err != nil && !os.IsExist(err) {
return err
}
if criuOpts.WorkDirectory == "" {
criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
}
if err := os.Mkdir(criuOpts.WorkDirectory, 0700); err != nil && !os.IsExist(err) {
return err
}
workDir, err := os.Open(criuOpts.WorkDirectory)
if err != nil {
return err
}
defer workDir.Close()
imageDir, err := os.Open(criuOpts.ImagesDirectory)
if err != nil {
return err
}
defer imageDir.Close()
rpcOpts := criurpc.CriuOpts{
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
WorkDirFd: proto.Int32(int32(workDir.Fd())),
LogLevel: proto.Int32(4),
LogFile: proto.String("dump.log"),
Root: proto.String(c.config.Rootfs),
ManageCgroups: proto.Bool(true),
NotifyScripts: proto.Bool(true),
Pid: proto.Int32(int32(c.initProcess.pid())),
ShellJob: proto.Bool(criuOpts.ShellJob),
LeaveRunning: proto.Bool(criuOpts.LeaveRunning),
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
FileLocks: proto.Bool(criuOpts.FileLocks),
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
OrphanPtsMaster: proto.Bool(true),
AutoDedup: proto.Bool(criuOpts.AutoDedup),
LazyPages: proto.Bool(criuOpts.LazyPages),
}
c.handleCriuConfigurationFile(&rpcOpts)
// If the container is running in a network namespace and has
// a path to the network namespace configured, we will dump
// that network namespace as an external namespace and we
// will expect that the namespace exists during restore.
// This basically means that CRIU will ignore the namespace
// and expect to be setup correctly.
if err := c.handleCheckpointingExternalNamespaces(&rpcOpts, configs.NEWNET); err != nil {
return err
}
// Same for possible external PID namespaces
if err := c.handleCheckpointingExternalNamespaces(&rpcOpts, configs.NEWPID); err != nil {
return err
}
// CRIU can use cgroup freezer; when rpcOpts.FreezeCgroup
// is not set, CRIU uses ptrace() to pause the processes.
// Note cgroup v2 freezer is only supported since CRIU release 3.14.
if !cgroups.IsCgroup2UnifiedMode() || c.checkCriuVersion(31400) == nil {
if fcg := c.cgroupManager.Path("freezer"); fcg != "" {
rpcOpts.FreezeCgroup = proto.String(fcg)
}
}
// append optional criu opts, e.g., page-server and port
if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 {
rpcOpts.Ps = &criurpc.CriuPageServerInfo{
Address: proto.String(criuOpts.PageServer.Address),
Port: proto.Int32(criuOpts.PageServer.Port),
}
}
//pre-dump may need parentImage param to complete iterative migration
if criuOpts.ParentImage != "" {
rpcOpts.ParentImg = proto.String(criuOpts.ParentImage)
rpcOpts.TrackMem = proto.Bool(true)
}
// append optional manage cgroups mode
if criuOpts.ManageCgroupsMode != 0 {
mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
rpcOpts.ManageCgroupsMode = &mode
}
var t criurpc.CriuReqType
if criuOpts.PreDump {
feat := criurpc.CriuFeatures{
MemTrack: proto.Bool(true),
}
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
return err
}
t = criurpc.CriuReqType_PRE_DUMP
} else {
t = criurpc.CriuReqType_DUMP
}
if criuOpts.LazyPages {
// lazy migration requested; check if criu supports it
feat := criurpc.CriuFeatures{
LazyPages: proto.Bool(true),
}
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
return err
}
if fd := criuOpts.StatusFd; fd != -1 {
// check that the FD is valid
flags, err := unix.FcntlInt(uintptr(fd), unix.F_GETFL, 0)
if err != nil {
return fmt.Errorf("invalid --status-fd argument %d: %w", fd, err)
}
// and writable
if flags&unix.O_WRONLY == 0 {
return fmt.Errorf("invalid --status-fd argument %d: not writable", fd)
}
if c.checkCriuVersion(31500) != nil {
// For criu 3.15+, use notifications (see case "status-ready"
// in criuNotifications). Otherwise, rely on criu status fd.
rpcOpts.StatusFd = proto.Int32(int32(fd))
}
}
}
req := &criurpc.CriuReq{
Type: &t,
Opts: &rpcOpts,
}
// no need to dump all this in pre-dump
if !criuOpts.PreDump {
hasCgroupns := c.config.Namespaces.Contains(configs.NEWCGROUP)
for _, m := range c.config.Mounts {
switch m.Device {
case "bind":
c.addCriuDumpMount(req, m)
case "cgroup":
if cgroups.IsCgroup2UnifiedMode() || hasCgroupns {
// real mount(s)
continue
}
// a set of "external" bind mounts
binds, err := getCgroupMounts(m)
if err != nil {
return err
}
for _, b := range binds {
c.addCriuDumpMount(req, b)
}
}
}
if err := c.addMaskPaths(req); err != nil {
return err
}
for _, node := range c.config.Devices {
m := &configs.Mount{Destination: node.Path, Source: node.Path}
c.addCriuDumpMount(req, m)
}
// Write the FD info to a file in the image directory
fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors())
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0600)
if err != nil {
return err
}
}
err = c.criuSwrk(nil, req, criuOpts, nil)
if err != nil {
return err
}
return nil
}
func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) {
mountDest := strings.TrimPrefix(m.Destination, c.config.Rootfs)
extMnt := &criurpc.ExtMountMap{
Key: proto.String(mountDest),
Val: proto.String(m.Source),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
func (c *linuxContainer) restoreNetwork(req *criurpc.CriuReq, criuOpts *CriuOpts) {
for _, iface := range c.config.Networks {
switch iface.Type {
case "veth":
veth := new(criurpc.CriuVethPair)
veth.IfOut = proto.String(iface.HostInterfaceName)
veth.IfIn = proto.String(iface.Name)
req.Opts.Veths = append(req.Opts.Veths, veth)
case "loopback":
// Do nothing
}
}
for _, i := range criuOpts.VethPairs {
veth := new(criurpc.CriuVethPair)
veth.IfOut = proto.String(i.HostInterfaceName)
veth.IfIn = proto.String(i.ContainerInterfaceName)
req.Opts.Veths = append(req.Opts.Veths, veth)
}
}
// makeCriuRestoreMountpoints makes the actual mountpoints for the
// restore using CRIU. This function is inspired from the code in
// rootfs_linux.go
func (c *linuxContainer) makeCriuRestoreMountpoints(m *configs.Mount) error {
switch m.Device {
case "cgroup":
// No mount point(s) need to be created:
//
// * for v1, mount points are saved by CRIU because
// /sys/fs/cgroup is a tmpfs mount
//
// * for v2, /sys/fs/cgroup is a real mount, but
// the mountpoint appears as soon as /sys is mounted
return nil
case "bind":
// The prepareBindMount() function checks if source
// exists. So it cannot be used for other filesystem types.
if err := prepareBindMount(m, c.config.Rootfs); err != nil {
return err
}
default:
// for all other filesystems just create the mountpoints
dest, err := securejoin.SecureJoin(c.config.Rootfs, m.Destination)
if err != nil {
return err
}
if err := checkProcMount(c.config.Rootfs, dest, ""); err != nil {
return err
}
m.Destination = dest
if err := os.MkdirAll(dest, 0755); err != nil {
return err
}
}
return nil
}
// isPathInPrefixList is a small function for CRIU restore to make sure
// mountpoints, which are on a tmpfs, are not created in the roofs
func isPathInPrefixList(path string, prefix []string) bool {
for _, p := range prefix {
if strings.HasPrefix(path, p+"/") {
return true
}
}
return false
}
// prepareCriuRestoreMounts tries to set up the rootfs of the
// container to be restored in the same way runc does it for
// initial container creation. Even for a read-only rootfs container
// runc modifies the rootfs to add mountpoints which do not exist.
// This function also creates missing mountpoints as long as they
// are not on top of a tmpfs, as CRIU will restore tmpfs content anyway.
func (c *linuxContainer) prepareCriuRestoreMounts(mounts []*configs.Mount) error {
// First get a list of a all tmpfs mounts
tmpfs := []string{}
for _, m := range mounts {
switch m.Device {
case "tmpfs":
tmpfs = append(tmpfs, m.Destination)
}
}
// Now go through all mounts and create the mountpoints
// if the mountpoints are not on a tmpfs, as CRIU will
// restore the complete tmpfs content from its checkpoint.
for _, m := range mounts {
if !isPathInPrefixList(m.Destination, tmpfs) {
if err := c.makeCriuRestoreMountpoints(m); err != nil {
return err
}
}
}
return nil
}
func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error {
c.m.Lock()
defer c.m.Unlock()
var extraFiles []*os.File
// Restore is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS().
// (CLI prints a warning)
// TODO(avagin): Figure out how to make this work nicely. CRIU doesn't have
// support for unprivileged restore at the moment.
// We are relying on the CRIU version RPC which was introduced with CRIU 3.0.0
if err := c.checkCriuVersion(30000); err != nil {
return err
}
if criuOpts.WorkDirectory == "" {
criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
}
// Since a container can be C/R'ed multiple times,
// the work directory may already exist.
if err := os.Mkdir(criuOpts.WorkDirectory, 0700); err != nil && !os.IsExist(err) {
return err
}
workDir, err := os.Open(criuOpts.WorkDirectory)
if err != nil {
return err
}
defer workDir.Close()
if criuOpts.ImagesDirectory == "" {
return errors.New("invalid directory to restore checkpoint")
}
imageDir, err := os.Open(criuOpts.ImagesDirectory)
if err != nil {
return err
}
defer imageDir.Close()
// CRIU has a few requirements for a root directory:
// * it must be a mount point
// * its parent must not be overmounted
// c.config.Rootfs is bind-mounted to a temporary directory
// to satisfy these requirements.
root := filepath.Join(c.root, "criu-root")
if err := os.Mkdir(root, 0755); err != nil {
return err
}
defer os.Remove(root)
root, err = filepath.EvalSymlinks(root)
if err != nil {
return err
}
err = unix.Mount(c.config.Rootfs, root, "", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
return err
}
defer unix.Unmount(root, unix.MNT_DETACH)
t := criurpc.CriuReqType_RESTORE
req := &criurpc.CriuReq{
Type: &t,
Opts: &criurpc.CriuOpts{
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
WorkDirFd: proto.Int32(int32(workDir.Fd())),
EvasiveDevices: proto.Bool(true),
LogLevel: proto.Int32(4),
LogFile: proto.String("restore.log"),
RstSibling: proto.Bool(true),
Root: proto.String(root),
ManageCgroups: proto.Bool(true),
NotifyScripts: proto.Bool(true),
ShellJob: proto.Bool(criuOpts.ShellJob),
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
FileLocks: proto.Bool(criuOpts.FileLocks),
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
OrphanPtsMaster: proto.Bool(true),
AutoDedup: proto.Bool(criuOpts.AutoDedup),
LazyPages: proto.Bool(criuOpts.LazyPages),
},
}
c.handleCriuConfigurationFile(req.Opts)
if err := c.handleRestoringNamespaces(req.Opts, &extraFiles); err != nil {
return err
}
// This will modify the rootfs of the container in the same way runc
// modifies the container during initial creation.
if err := c.prepareCriuRestoreMounts(c.config.Mounts); err != nil {
return err
}
hasCgroupns := c.config.Namespaces.Contains(configs.NEWCGROUP)
for _, m := range c.config.Mounts {
switch m.Device {
case "bind":
c.addCriuRestoreMount(req, m)
case "cgroup":
if cgroups.IsCgroup2UnifiedMode() || hasCgroupns {
continue
}
// cgroup v1 is a set of bind mounts, unless cgroupns is used
binds, err := getCgroupMounts(m)
if err != nil {
return err
}
for _, b := range binds {
c.addCriuRestoreMount(req, b)
}
}
}
if len(c.config.MaskPaths) > 0 {
m := &configs.Mount{Destination: "/dev/null", Source: "/dev/null"}
c.addCriuRestoreMount(req, m)
}
for _, node := range c.config.Devices {
m := &configs.Mount{Destination: node.Path, Source: node.Path}
c.addCriuRestoreMount(req, m)
}
if criuOpts.EmptyNs&unix.CLONE_NEWNET == 0 {
c.restoreNetwork(req, criuOpts)
}
// append optional manage cgroups mode
if criuOpts.ManageCgroupsMode != 0 {
mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
req.Opts.ManageCgroupsMode = &mode
}
var (
fds []string
fdJSON []byte
)
if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil {
return err
}
if err := json.Unmarshal(fdJSON, &fds); err != nil {
return err
}
for i := range fds {
if s := fds[i]; strings.Contains(s, "pipe:") {
inheritFd := new(criurpc.InheritFd)
inheritFd.Key = proto.String(s)
inheritFd.Fd = proto.Int32(int32(i))
req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
}
}
err = c.criuSwrk(process, req, criuOpts, extraFiles)
// Now that CRIU is done let's close all opened FDs CRIU needed.
for _, fd := range extraFiles {
fd.Close()
}
return err
}
func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error {
// need to apply cgroups only on restore
if req.GetType() != criurpc.CriuReqType_RESTORE {
return nil
}
// XXX: Do we need to deal with this case? AFAIK criu still requires root.
if err := c.cgroupManager.Apply(pid); err != nil {
return err
}
if err := c.cgroupManager.Set(c.config); err != nil {
return newSystemError(err)
}
if cgroups.IsCgroup2UnifiedMode() {
return nil
}
// the stuff below is cgroupv1-specific
path := fmt.Sprintf("/proc/%d/cgroup", pid)
cgroupsPaths, err := cgroups.ParseCgroupFile(path)
if err != nil {
return err
}
for c, p := range cgroupsPaths {
cgroupRoot := &criurpc.CgroupRoot{
Ctrl: proto.String(c),
Path: proto.String(p),
}
req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot)
}
return nil
}
func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, extraFiles []*os.File) error {
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0)
if err != nil {
return err
}
var logPath string
if opts != nil {
logPath = filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile())
} else {
// For the VERSION RPC 'opts' is set to 'nil' and therefore
// opts.WorkDirectory does not exist. Set logPath to "".
logPath = ""
}
criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client")
criuClientFileCon, err := net.FileConn(criuClient)
criuClient.Close()
if err != nil {
return err
}
criuClientCon := criuClientFileCon.(*net.UnixConn)
defer criuClientCon.Close()
criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server")
defer criuServer.Close()
args := []string{"swrk", "3"}
if c.criuVersion != 0 {
// If the CRIU Version is still '0' then this is probably
// the initial CRIU run to detect the version. Skip it.
logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath)
}
logrus.Debugf("Using CRIU with following args: %s", args)
cmd := exec.Command(c.criuPath, args...)
if process != nil {
cmd.Stdin = process.Stdin
cmd.Stdout = process.Stdout
cmd.Stderr = process.Stderr
}
cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer)
if extraFiles != nil {
cmd.ExtraFiles = append(cmd.ExtraFiles, extraFiles...)
}
if err := cmd.Start(); err != nil {
return err
}
// we close criuServer so that even if CRIU crashes or unexpectedly exits, runc will not hang.
criuServer.Close()
// cmd.Process will be replaced by a restored init.
criuProcess := cmd.Process
var criuProcessState *os.ProcessState
defer func() {
if criuProcessState == nil {
criuClientCon.Close()
_, err := criuProcess.Wait()
if err != nil {
logrus.Warnf("wait on criuProcess returned %v", err)
}
}
}()
if err := c.criuApplyCgroups(criuProcess.Pid, req); err != nil {
return err
}
var extFds []string
if process != nil {
extFds, err = getPipeFds(criuProcess.Pid)
if err != nil {
return err
}
}
logrus.Debugf("Using CRIU in %s mode", req.GetType().String())
// In the case of criurpc.CriuReqType_FEATURE_CHECK req.GetOpts()
// should be empty. For older CRIU versions it still will be
// available but empty. criurpc.CriuReqType_VERSION actually
// has no req.GetOpts().
if !(req.GetType() == criurpc.CriuReqType_FEATURE_CHECK ||
req.GetType() == criurpc.CriuReqType_VERSION) {
val := reflect.ValueOf(req.GetOpts())
v := reflect.Indirect(val)
for i := 0; i < v.NumField(); i++ {
st := v.Type()
name := st.Field(i).Name
if strings.HasPrefix(name, "XXX_") {
continue
}
value := val.MethodByName("Get" + name).Call([]reflect.Value{})
logrus.Debugf("CRIU option %s with value %v", name, value[0])
}
}
data, err := proto.Marshal(req)
if err != nil {
return err
}
_, err = criuClientCon.Write(data)
if err != nil {
return err
}
buf := make([]byte, 10*4096)
oob := make([]byte, 4096)
for {
n, oobn, _, _, err := criuClientCon.ReadMsgUnix(buf, oob)
if req.Opts != nil && req.Opts.StatusFd != nil {
// Close status_fd as soon as we got something back from criu,
// assuming it has consumed (reopened) it by this time.
// Otherwise it will might be left open forever and whoever
// is waiting on it will wait forever.
fd := int(*req.Opts.StatusFd)
_ = unix.Close(fd)
req.Opts.StatusFd = nil
}
if err != nil {
return err
}
if n == 0 {
return errors.New("unexpected EOF")
}
if n == len(buf) {
return errors.New("buffer is too small")
}
resp := new(criurpc.CriuResp)
err = proto.Unmarshal(buf[:n], resp)
if err != nil {
return err
}
if !resp.GetSuccess() {
typeString := req.GetType().String()
return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath)
}
t := resp.GetType()
switch {
case t == criurpc.CriuReqType_FEATURE_CHECK:
logrus.Debugf("Feature check says: %s", resp)
criuFeatures = resp.GetFeatures()
case t == criurpc.CriuReqType_NOTIFY:
if err := c.criuNotifications(resp, process, cmd, opts, extFds, oob[:oobn]); err != nil {
return err
}
t = criurpc.CriuReqType_NOTIFY
req = &criurpc.CriuReq{
Type: &t,
NotifySuccess: proto.Bool(true),
}
data, err = proto.Marshal(req)
if err != nil {
return err
}
_, err = criuClientCon.Write(data)
if err != nil {
return err
}
continue
case t == criurpc.CriuReqType_RESTORE:
case t == criurpc.CriuReqType_DUMP:
case t == criurpc.CriuReqType_PRE_DUMP:
default:
return fmt.Errorf("unable to parse the response %s", resp.String())
}
break
}
criuClientCon.CloseWrite()
// cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors.
// Here we want to wait only the CRIU process.
criuProcessState, err = criuProcess.Wait()
if err != nil {
return err
}
// In pre-dump mode CRIU is in a loop and waits for
// the final DUMP command.
// The current runc pre-dump approach, however, is
// start criu in PRE_DUMP once for a single pre-dump
// and not the whole series of pre-dump, pre-dump, ...m, dump
// If we got the message CriuReqType_PRE_DUMP it means
// CRIU was successful and we need to forcefully stop CRIU
if !criuProcessState.Success() && *req.Type != criurpc.CriuReqType_PRE_DUMP {
return fmt.Errorf("criu failed: %s\nlog file: %s", criuProcessState.String(), logPath)
}
return nil
}
// block any external network activity
func lockNetwork(config *configs.Config) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err := strategy.detach(config); err != nil {
return err
}
}
return nil
}
func unlockNetwork(config *configs.Config) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err = strategy.attach(config); err != nil {
return err
}
}
return nil
}
func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, cmd *exec.Cmd, opts *CriuOpts, fds []string, oob []byte) error {
notify := resp.GetNotify()
if notify == nil {
return fmt.Errorf("invalid response: %s", resp.String())
}
script := notify.GetScript()
logrus.Debugf("notify: %s\n", script)
switch script {
case "post-dump":
f, err := os.Create(filepath.Join(c.root, "checkpoint"))
if err != nil {
return err
}
f.Close()
case "network-unlock":
if err := unlockNetwork(c.config); err != nil {
return err
}
case "network-lock":
if err := lockNetwork(c.config); err != nil {
return err
}
case "setup-namespaces":
if c.config.Hooks != nil {
s, err := c.currentOCIState()
if err != nil {
return nil
}
s.Pid = int(notify.GetPid())
if err := c.config.Hooks[configs.Prestart].RunHooks(s); err != nil {
return err
}
if err := c.config.Hooks[configs.CreateRuntime].RunHooks(s); err != nil {
return err
}
}
case "post-restore":
pid := notify.GetPid()
p, err := os.FindProcess(int(pid))
if err != nil {
return err
}
cmd.Process = p
r, err := newRestoredProcess(cmd, fds)
if err != nil {
return err
}
process.ops = r
if err := c.state.transition(&restoredState{
imageDir: opts.ImagesDirectory,
c: c,
}); err != nil {
return err
}
// create a timestamp indicating when the restored checkpoint was started
c.created = time.Now().UTC()
if _, err := c.updateState(r); err != nil {
return err
}
if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil {
if !os.IsNotExist(err) {
logrus.Error(err)
}
}
case "orphan-pts-master":
scm, err := unix.ParseSocketControlMessage(oob)
if err != nil {
return err
}
fds, err := unix.ParseUnixRights(&scm[0])
if err != nil {
return err
}
master := os.NewFile(uintptr(fds[0]), "orphan-pts-master")
defer master.Close()
// While we can access console.master, using the API is a good idea.
if err := utils.SendFd(process.ConsoleSocket, master.Name(), master.Fd()); err != nil {
return err
}
case "status-ready":
if opts.StatusFd != -1 {
// write \0 to status fd to notify that lazy page server is ready
_, err := unix.Write(opts.StatusFd, []byte{0})
if err != nil {
logrus.Warnf("can't write \\0 to status fd: %v", err)
}
_ = unix.Close(opts.StatusFd)
opts.StatusFd = -1
}
}
return nil
}
func (c *linuxContainer) updateState(process parentProcess) (*State, error) {
if process != nil {
c.initProcess = process
}
state, err := c.currentState()
if err != nil {
return nil, err
}
err = c.saveState(state)
if err != nil {
return nil, err
}
return state, nil
}
func (c *linuxContainer) saveState(s *State) (retErr error) {
tmpFile, err := ioutil.TempFile(c.root, "state-")
if err != nil {
return err
}
defer func() {
if retErr != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
}
}()
err = utils.WriteJSON(tmpFile, s)
if err != nil {
return err
}
err = tmpFile.Close()
if err != nil {
return err
}
stateFilePath := filepath.Join(c.root, stateFilename)
return os.Rename(tmpFile.Name(), stateFilePath)
}
func (c *linuxContainer) currentStatus() (Status, error) {
if err := c.refreshState(); err != nil {
return -1, err
}
return c.state.status(), nil
}
// refreshState needs to be called to verify that the current state on the
// container is what is true. Because consumers of libcontainer can use it
// out of process we need to verify the container's status based on runtime
// information and not rely on our in process info.
func (c *linuxContainer) refreshState() error {
paused, err := c.isPaused()
if err != nil {
return err
}
if paused {
return c.state.transition(&pausedState{c: c})
}
t := c.runType()
switch t {
case Created:
return c.state.transition(&createdState{c: c})
case Running:
return c.state.transition(&runningState{c: c})
}
return c.state.transition(&stoppedState{c: c})
}
func (c *linuxContainer) runType() Status {
if c.initProcess == nil {
return Stopped
}
pid := c.initProcess.pid()
stat, err := system.Stat(pid)
if err != nil {
return Stopped
}
if stat.StartTime != c.initProcessStartTime || stat.State == system.Zombie || stat.State == system.Dead {
return Stopped
}
// We'll create exec fifo and blocking on it after container is created,
// and delete it after start container.
if _, err := os.Stat(filepath.Join(c.root, execFifoFilename)); err == nil {
return Created
}
return Running
}
func (c *linuxContainer) isPaused() (bool, error) {
state, err := c.cgroupManager.GetFreezerState()
if err != nil {
return false, err
}
return state == configs.Frozen, nil
}
func (c *linuxContainer) currentState() (*State, error) {
var (
startTime uint64
externalDescriptors []string
pid = -1
)
if c.initProcess != nil {
pid = c.initProcess.pid()
startTime, _ = c.initProcess.startTime()
externalDescriptors = c.initProcess.externalDescriptors()
}
intelRdtPath, err := intelrdt.GetIntelRdtPath(c.ID())
if err != nil {
intelRdtPath = ""
}
state := &State{
BaseState: BaseState{
ID: c.ID(),
Config: *c.config,
InitProcessPid: pid,
InitProcessStartTime: startTime,
Created: c.created,
},
Rootless: c.config.RootlessEUID && c.config.RootlessCgroups,
CgroupPaths: c.cgroupManager.GetPaths(),
IntelRdtPath: intelRdtPath,
NamespacePaths: make(map[configs.NamespaceType]string),
ExternalDescriptors: externalDescriptors,
}
if pid > 0 {
for _, ns := range c.config.Namespaces {
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
for _, nsType := range configs.NamespaceTypes() {
if !configs.IsNamespaceSupported(nsType) {
continue
}
if _, ok := state.NamespacePaths[nsType]; !ok {
ns := configs.Namespace{Type: nsType}
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
}
}
return state, nil
}
func (c *linuxContainer) currentOCIState() (*specs.State, error) {
bundle, annotations := utils.Annotations(c.config.Labels)
state := &specs.State{
Version: specs.Version,
ID: c.ID(),
Bundle: bundle,
Annotations: annotations,
}
status, err := c.currentStatus()
if err != nil {
return nil, err
}
state.Status = specs.ContainerState(status.String())
if status != Stopped {
if c.initProcess != nil {
state.Pid = c.initProcess.pid()
}
}
return state, nil
}
// orderNamespacePaths sorts namespace paths into a list of paths that we
// can setns in order.
func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) {
paths := []string{}
for _, ns := range configs.NamespaceTypes() {
// Remove namespaces that we don't need to join.
if !c.config.Namespaces.Contains(ns) {
continue
}
if p, ok := namespaces[ns]; ok && p != "" {
// check if the requested namespace is supported
if !configs.IsNamespaceSupported(ns) {
return nil, newSystemError(fmt.Errorf("namespace %s is not supported", ns))
}
// only set to join this namespace if it exists
if _, err := os.Lstat(p); err != nil {
return nil, newSystemErrorWithCausef(err, "running lstat on namespace path %q", p)
}
// do not allow namespace path with comma as we use it to separate
// the namespace paths
if strings.ContainsRune(p, ',') {
return nil, newSystemError(fmt.Errorf("invalid path %s", p))
}
paths = append(paths, fmt.Sprintf("%s:%s", configs.NsName(ns), p))
}
}
return paths, nil
}
func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) {
data := bytes.NewBuffer(nil)
for _, im := range idMap {
line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size)
if _, err := data.WriteString(line); err != nil {
return nil, err
}
}
return data.Bytes(), nil
}
// bootstrapData encodes the necessary data in netlink binary format
// as a io.Reader.
// Consumer can write the data to a bootstrap program
// such as one that uses nsenter package to bootstrap the container's
// init process correctly, i.e. with correct namespaces, uid/gid
// mapping etc.
func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string) (io.Reader, error) {
// create the netlink message
r := nl.NewNetlinkRequest(int(InitMsg), 0)
// write cloneFlags
r.AddData(&Int32msg{
Type: CloneFlagsAttr,
Value: uint32(cloneFlags),
})
// write custom namespace paths
if len(nsMaps) > 0 {
nsPaths, err := c.orderNamespacePaths(nsMaps)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: NsPathsAttr,
Value: []byte(strings.Join(nsPaths, ",")),
})
}
// write namespace paths only when we are not joining an existing user ns
_, joinExistingUser := nsMaps[configs.NEWUSER]
if !joinExistingUser {
// write uid mappings
if len(c.config.UidMappings) > 0 {
if c.config.RootlessEUID && c.newuidmapPath != "" {
r.AddData(&Bytemsg{
Type: UidmapPathAttr,
Value: []byte(c.newuidmapPath),
})
}
b, err := encodeIDMapping(c.config.UidMappings)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: UidmapAttr,
Value: b,
})
}
// write gid mappings
if len(c.config.GidMappings) > 0 {
b, err := encodeIDMapping(c.config.GidMappings)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: GidmapAttr,
Value: b,
})
if c.config.RootlessEUID && c.newgidmapPath != "" {
r.AddData(&Bytemsg{
Type: GidmapPathAttr,
Value: []byte(c.newgidmapPath),
})
}
if requiresRootOrMappingTool(c.config) {
r.AddData(&Boolmsg{
Type: SetgroupAttr,
Value: true,
})
}
}
}
if c.config.OomScoreAdj != nil {
// write oom_score_adj
r.AddData(&Bytemsg{
Type: OomScoreAdjAttr,
Value: []byte(strconv.Itoa(*c.config.OomScoreAdj)),
})
}
// write rootless
r.AddData(&Boolmsg{
Type: RootlessEUIDAttr,
Value: c.config.RootlessEUID,
})
return bytes.NewReader(r.Serialize()), nil
}
// ignoreTerminateErrors returns nil if the given err matches an error known
// to indicate that the terminate occurred successfully or err was nil, otherwise
// err is returned unaltered.
func ignoreTerminateErrors(err error) error {
if err == nil {
return nil
}
// terminate() might return an error from ether Kill or Wait.
// The (*Cmd).Wait documentation says: "If the command fails to run
// or doesn't complete successfully, the error is of type *ExitError".
// Filter out such errors (like "exit status 1" or "signal: killed").
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
return nil
}
// TODO: use errors.Is(err, os.ErrProcessDone) here and
// remove "process already finished" string comparison below
// once go 1.16 is minimally supported version.
s := err.Error()
if strings.Contains(s, "process already finished") ||
strings.Contains(s, "Wait was already called") {
return nil
}
return err
}
func requiresRootOrMappingTool(c *configs.Config) bool {
gidMap := []configs.IDMap{
{ContainerID: 0, HostID: os.Getegid(), Size: 1},
}
return !reflect.DeepEqual(c.GidMappings, gidMap)
}
| 1 | 22,592 | First determine if the namespace already exists, so it's clearer that it's handling namespaces that aren't included in the `c.config.Namespaces` | opencontainers-runc | go |
@@ -1,7 +1,5 @@
-<section class="assets-wrapper">
- <div class="assets">
- <%= render "videos/download_link", download_type_key: "OriginalFile", download_type: "original", size_display: "Original (720p)", clip: clip %>
- <%= render "videos/download_link", download_type_key: "IphoneVideoFile", download_type: "iphone", size_display: "iPhone", clip: clip %>
- <%= render "videos/download_link", download_type_key: "HdMp4VideoFile", download_type: "hd_mp4", size_display: "HD MP4", clip: clip %>
- </div>
+<section class="download-links">
+ <%= render "videos/download_link", download_type_key: "OriginalFile", download_type: "original", size_display: "Original (720p)", clip: clip %>
+ <%= render "videos/download_link", download_type_key: "IphoneVideoFile", download_type: "iphone", size_display: "iPhone", clip: clip %>
+ <%= render "videos/download_link", download_type_key: "HdMp4VideoFile", download_type: "hd_mp4", size_display: "HD MP4", clip: clip %>
</section> | 1 | <section class="assets-wrapper">
<div class="assets">
<%= render "videos/download_link", download_type_key: "OriginalFile", download_type: "original", size_display: "Original (720p)", clip: clip %>
<%= render "videos/download_link", download_type_key: "IphoneVideoFile", download_type: "iphone", size_display: "iPhone", clip: clip %>
<%= render "videos/download_link", download_type_key: "HdMp4VideoFile", download_type: "hd_mp4", size_display: "HD MP4", clip: clip %>
</div>
</section>
| 1 | 17,162 | What changed in the styles that means we don't need this? | thoughtbot-upcase | rb |
@@ -275,13 +275,13 @@ namespace Microsoft.DotNet.Build.Tasks
}
}
- private JToken GetFrameworkDependenciesSection(JObject projectJsonRoot, string framework = null)
+ private JObject GetFrameworkDependenciesSection(JObject projectJsonRoot, string framework = null)
{
if(string.IsNullOrWhiteSpace(framework))
{
- return projectJsonRoot["dependencies"];
+ return (JObject) projectJsonRoot["dependencies"];
}
- return projectJsonRoot.SelectToken("frameworks." + NewtonsoftEscapeJProperty(framework) + ".dependencies");
+ return (JObject) projectJsonRoot["frameworks"][framework]["dependencies"];
}
// Generate the combines dependencies from the projectjson jObject and from AdditionalDependencies | 1 | using Microsoft.Build.Utilities;
using Microsoft.Build.Framework;
using Newtonsoft.Json;
using Newtonsoft.Json.Linq;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text.RegularExpressions;
using NuGet.Packaging;
using NuGet.Packaging.Core;
using NuGet.Versioning;
namespace Microsoft.DotNet.Build.Tasks
{
/// <summary>
/// Parse a project.json, and add additional dependencies, then write a out new project.json.
/// Use-case scenarios
/// 1. Provide a list of package drops, this becomes the source of package versions
/// 2. Provide a versions files, this becomes the source of package versions
/// If both a package drop and a version file are provided, then the package drop takes precedent over the version file.
/// </summary>
public class AddDependenciesToProjectJson : Task
{
// Additional Dependencies to add to the project.json. May Optionally contain a version.
// Will Override dependencies present in the project if there is a conflict.
// AdditionalDependencies required metadata: Name, Version
[Required]
public ITaskItem[] AdditionalDependencies { get; set; }
// Framework section which the additional dependencies apply to. Empty is the default dependencies section.
public string[] Frameworks { get; set; }
public string[] PackagesDrops { get; set; }
[Required]
public string PackageNameRegex { get; set; }
public string[] VersionsFiles { get; set; }
/// <summary>
/// If there are multiple package items from different sources (ie, package items found in one or more package drops,
/// package items found in one or more versions files) with the same package name, allow the conflict, but choose
/// the newest package version.
/// </summary>
public bool UseNewestAvailablePackages { get; set; }
/// <summary>
/// Original package version which is used to seed the output project.json
/// </summary>
[Required]
public string ProjectJson { get; set; }
/// <summary>
/// External package dependency versions.
/// </summary>
public ITaskItem[] ExternalPackages { get; set; }
/// <summary>
/// Optional list of RIDs to exclude from the generated project.json.
/// </summary>
public string[] ExcludedRuntimes { get; set; }
// The directory to put the generated project.json in
[Required]
public string OutputProjectJson { get; set; }
private Regex _packageNameRegex;
private VersionComparer comparer = new VersionComparer(VersionComparison.VersionRelease);
public override bool Execute()
{
if (!File.Exists(ProjectJson))
{
Log.LogError("Cannot find specified project.json - '{0}'", ProjectJson);
return false;
}
Dictionary<string, PackageItem> packageInformation = new Dictionary<string, PackageItem>();
_packageNameRegex = new Regex(PackageNameRegex);
// Retrieve package information from a package drop location
if (PackagesDrops != null &&
PackagesDrops.Length > 0)
{
AddPackageItemsToDictionary(ref packageInformation, GatherPackageInformationFromDrops(PackagesDrops));
}
// Retrieve package information from a versions file
if (VersionsFiles != null)
{
foreach (var versionsFile in VersionsFiles)
{
if (!File.Exists(versionsFile))
{
Log.LogError("Version file {0} does not exist.", versionsFile);
}
AddPackageItemsToDictionary(ref packageInformation, GatherPackageInformationFromVersionsFile(versionsFile, comparer));
}
}
JObject projectRoot = ReadProject(ProjectJson);
var invalidFramework = AreValidFrameworkPaths(projectRoot);
if(invalidFramework != string.Empty)
{
OutputProjectJson = ProjectJson;
Log.LogError("Unable to find framework section '{0}' in '{1}'", invalidFramework, ProjectJson);
return false;
}
// No Frameworks were specified, apply AdditionalDependencies to all framework groups in the project.json
if (Frameworks == null || Frameworks.Length == 0)
{
Frameworks = projectRoot.SelectTokens("frameworks").SelectMany(f => f.Children().Select(c => ((JProperty)c).Name)).ToArray();
}
// Update default dependencies section
JObject dependencies = GenerateDependencies(projectRoot, ExternalPackages, packageInformation);
projectRoot = UpdateDependenciesProperty(projectRoot, dependencies);
if (ExcludedRuntimes != null)
{
var excludedRIDs = new HashSet<string>(ExcludedRuntimes, StringComparer.OrdinalIgnoreCase);
projectRoot = FilterRuntimes(projectRoot, excludedRIDs);
}
// Update framework dependencies sections
for (int i = 0; i < Frameworks.Length; i++)
{
dependencies = GenerateDependencies(projectRoot, ExternalPackages, packageInformation, Frameworks[i]);
projectRoot = UpdateDependenciesProperty(projectRoot, dependencies, Frameworks[i]);
}
WriteProject(projectRoot, OutputProjectJson);
return true;
}
/// <summary>
/// Given a package name regex pattern, and an array of drop locations, create an array of objects
/// containing package information (name, version,, prerelease version)
/// </summary>
/// <param name="packagesDrops"></param>
/// <returns></returns>
private Dictionary<string, PackageItem> GatherPackageInformationFromDrops(string [] packagesDrops)
{
Dictionary<string, PackageItem> packageItems = new Dictionary<string, PackageItem>();
foreach (string packageDrop in packagesDrops)
{
if (!Directory.Exists(packageDrop))
{
Log.LogWarning("PackageDrop does not exist - '{0}'", packageDrop);
continue;
}
IEnumerable<string> packages = Directory.GetFiles(packageDrop);
foreach (var package in packages)
{
PackageItem packageItem = CreatePackageItem(package);
AddPackageItemToDictionary(packageItems, packageItem);
}
}
return packageItems;
}
private void AddPackageItemToDictionary(Dictionary<string, PackageItem> packageItems, PackageItem packageItem)
{
if (packageItems.ContainsKey(packageItem.Name))
{
if (comparer == null)
{
comparer = new VersionComparer(VersionComparison.VersionRelease);
}
if (comparer.Compare(packageItems[packageItem.Name].Version, packageItem.Version) != 0 && UseNewestAvailablePackages != true)
{
Log.LogError("Package named {0} already exists. Cannot have multiple packages with the same name.\n", packageItem.Name);
Log.LogError("To permit package name clashes and take latest, specify 'UseNewestAvailablePackages=true'.\n");
Log.LogError("Package {0} version {1} clashes with {2}", packageItem.Name, packageItems[packageItem.Name].Version.ToFullString(), packageItem.Version.ToFullString());
}
else if (UseNewestAvailablePackages == true)
{
PackageItem item = (comparer.Compare(packageItems[packageItem.Name].Version, packageItem.Version) < 0) ? packageItem : packageItems[packageItem.Name];
packageItems[packageItem.Name] = item;
}
}
else
{
packageItems.Add(packageItem.Name, packageItem);
}
}
private void AddPackageItemsToDictionary(ref Dictionary<string, PackageItem> packageItems, Dictionary<string, PackageItem> addPackageItems)
{
foreach(var packageItem in addPackageItems.Values)
{
AddPackageItemToDictionary(packageItems, packageItem);
}
}
// A versions file is of the form https://github.com/dotnet/versions/blob/master/build-info/dotnet/corefx/master/Latest_Packages.txt
private Dictionary<string, PackageItem> GatherPackageInformationFromVersionsFile(string versionsFile, VersionComparer comparer = null)
{
Dictionary<string, PackageItem> packageItems = new Dictionary<string, PackageItem>();
if (!File.Exists(versionsFile))
{
Log.LogError("Specified versions file ({0}) does not exist.", versionsFile);
}
var lines = File.ReadAllLines(versionsFile);
foreach(string line in lines)
{
if(!string.IsNullOrWhiteSpace(line))
{
string [] packageVersionTokens = line.Split(' ');
PackageItem packageItem = CreatePackageItem(packageVersionTokens[0], packageVersionTokens[1]);
AddPackageItemToDictionary(packageItems, packageItem);
}
}
return packageItems;
}
/// <summary>
/// Create a package item object from a nupkg file
/// </summary>
/// <param name="package">path to a nupkg</param>
/// <returns></returns>
private PackageItem CreatePackageItem(string package)
{
using (PackageArchiveReader archiveReader = new PackageArchiveReader(package))
{
PackageIdentity identity = archiveReader.GetIdentity();
return new PackageItem(identity.Id, identity.Version);
}
}
/// <summary>
/// Create a package item object from a package name (id) and version
/// </summary>
/// <param name="id"></param>
/// <param name="version"></param>
/// <returns></returns>
private PackageItem CreatePackageItem(string id, string version)
{
NuGetVersion nuGetVersion = new NuGetVersion(version);
return new PackageItem(id, nuGetVersion);
}
private string AreValidFrameworkPaths(JObject projectRoot)
{
if(Frameworks == null ||
Frameworks.Length == 0)
{
return string.Empty;
}
// Check for a valid path, if invalid, exit
for (int i = 0; i < Frameworks.Length; i++)
{
var _frameworkPath = "frameworks." + NewtonsoftEscapeJProperty(Frameworks[i]);
var validFramework = projectRoot.SelectToken(_frameworkPath);
if (validFramework == null)
{
return _frameworkPath;
}
}
return string.Empty;
}
private static JObject ReadProject(string projectJsonPath)
{
using (TextReader projectFileReader = File.OpenText(projectJsonPath))
{
var projectJsonReader = new JsonTextReader(projectFileReader);
var serializer = new JsonSerializer();
return serializer.Deserialize<JObject>(projectJsonReader);
}
}
private JToken GetFrameworkDependenciesSection(JObject projectJsonRoot, string framework = null)
{
if(string.IsNullOrWhiteSpace(framework))
{
return projectJsonRoot["dependencies"];
}
return projectJsonRoot.SelectToken("frameworks." + NewtonsoftEscapeJProperty(framework) + ".dependencies");
}
// Generate the combines dependencies from the projectjson jObject and from AdditionalDependencies
private JObject GenerateDependencies(JObject projectJsonRoot, ITaskItem[] externalPackageVersions, Dictionary<string, PackageItem> packageInformation, string framework = null)
{
var originalDependenciesList = new List<JToken>();
var returnDependenciesList = new Dictionary<string, JToken>();
var frameworkDependencies = GetFrameworkDependenciesSection(projectJsonRoot, framework);
if (frameworkDependencies != null)
{
originalDependenciesList = frameworkDependencies.Children().ToList();
// Update versions in dependencies
foreach (JProperty property in originalDependenciesList.Select(od => od))
{
PackageItem packageItem = null;
if (packageInformation.ContainsKey(property.Name))
{
packageItem = packageInformation[property.Name];
NuGetVersion nuGetVersion = packageItem.Version;
// Only add the original dependency if it wasn't passed as an AdditionalDependency, ie. AdditionalDependencies may override dependencies in project.json
if (!AdditionalDependencies.Any(d => d.ItemSpec.Equals(property.Name, StringComparison.OrdinalIgnoreCase)))
{
JProperty addProperty;
if (nuGetVersion != null)
{
addProperty = new JProperty(property.Name, nuGetVersion.ToString());
}
else
{
addProperty = property;
}
returnDependenciesList.Add(property.Name, addProperty);
}
}
else
{
returnDependenciesList.Add(property.Name, property);
}
}
}
foreach (var dependency in AdditionalDependencies)
{
string name = dependency.GetMetadata("Name");
// Don't add a new dependency if one already exists.
if (!returnDependenciesList.ContainsKey(name))
{
NuGetVersion nuGetVersion = NuGetVersion.Parse(dependency.GetMetadata("Version"));
PackageItem packageItem = new PackageItem(name, nuGetVersion);
string version = packageItem.GetVersionString();
// a package version was provided, use its version information.
if (packageInformation.ContainsKey(name))
{
version = packageInformation[name].Version.ToString();
}
JProperty property = new JProperty(name, version);
returnDependenciesList.Add(name, property);
}
else
{
Log.LogMessage("Ignoring AdditionalDependency '{0}', dependency is already present in {1}", name, ProjectJson);
}
}
return new JObject(returnDependenciesList.Values.ToArray());
}
/* Given a project.json as a JObject, replace it's dependencies property with a new dependencies property. */
private JObject UpdateDependenciesProperty(JObject projectJsonRoot, JObject updatedProperties, string framework = null)
{
var frameworkPath = string.Empty;
if(!string.IsNullOrWhiteSpace(framework))
{
frameworkPath = "frameworks." + NewtonsoftEscapeJProperty(framework);
}
var frameworkPathObject = projectJsonRoot.SelectToken(frameworkPath);
frameworkPathObject["dependencies"] = updatedProperties;
return projectJsonRoot;
}
private JObject FilterRuntimes(JObject projectRoot, HashSet<string> excludedRIDs)
{
var runtimes = projectRoot["runtimes"];
if (runtimes != null)
{
var toRemove = new List<JToken>();
foreach (JProperty runtime in runtimes)
{
if (excludedRIDs.Contains(runtime.Name))
toRemove.Add(runtime);
}
foreach (var token in toRemove)
{
Log.LogMessage("Removing RID '{0}' from the list of applicable runtimes.", ((JProperty)token).Name);
token.Remove();
}
}
return projectRoot;
}
private static void WriteProject(JObject projectRoot, string projectJsonPath)
{
string projectJson = JsonConvert.SerializeObject(projectRoot, Formatting.Indented) + Environment.NewLine;
if (!File.Exists(projectJsonPath) || !projectJson.Equals(File.ReadAllText(projectJsonPath)))
{
Directory.CreateDirectory(Path.GetDirectoryName(projectJsonPath));
File.WriteAllText(projectJsonPath, projectJson);
}
}
/* JProperties are encapsulated with "['" and "']" to assist with matching Paths which
contain properties with a '.'. ie. frameworks.netcoreapp1.0 becomes frameworks.['netcoreapp1.0'].
A match for a property without a '.' and unencapsulated still works. ie, we can still select
frameworks.['netcoreapp1.0'] even if internally its path is frameworks.netcoreapp1.0. */
private static string NewtonsoftEscapeJProperty(string property)
{
if (string.IsNullOrWhiteSpace(property))
{
return property;
}
if (!property.StartsWith("['") && !property.EndsWith("']"))
{
property = "['" + property + "']";
}
return property;
}
}
internal class PackageItem
{
public string Name { get; set; }
public NuGetVersion Version
{
set { _version = value; }
get { return _version; }
}
NuGetVersion _version;
public PackageItem() { }
public PackageItem(string name) { Name = name; }
public PackageItem(string name, NuGetVersion version) { Name = name; Version = version; }
public string GetVersionString()
{
return string.Join(".", _version.Major, _version.Minor, _version.Patch);
}
public TaskItem ToTaskItem()
{
TaskItem taskItem = new TaskItem(Name);
taskItem.SetMetadata("Name", Name);
taskItem.SetMetadata("Version", string.Join(".", Version.Major, Version.Minor, Version.Patch));
taskItem.SetMetadata("Prerelease", Version.Release);
return taskItem;
}
}
}
| 1 | 11,152 | This pattern should be applied to the other instance where NewtonsoftEscapeJProperty is used and you can remove the NewtonsoftEscapeJProperty method. | dotnet-buildtools | .cs |
@@ -165,6 +165,10 @@ class Histogram(Element2D):
vdims = param.List(default=[Dimension('Frequency')], bounds=(1,1))
def __init__(self, values, edges=None, **params):
+ if edges is not None:
+ self.warning("Histogram edges should be supplied as a tuple "
+ "along with the values, passing the edges will "
+ "be deprecated in holoviews 2.0.")
self.values, self.edges, settings = self._process_data(values, edges)
settings.update(params)
super(Histogram, self).__init__((self.values, self.edges), **settings) | 1 | import numpy as np
import param
from ..core import util
from ..core import Dimension, Dataset, Element2D
from .util import compute_edges
class Chart(Dataset, Element2D):
"""
The data held within Chart is a numpy array of shape (N, D),
where N is the number of samples and D the number of dimensions.
Chart Elements are sliceable along up to two key dimensions.
The data may be supplied in one of three formats:
1) As a numpy array of shape (N, D).
2) As a list of length N containing tuples of length D.
3) As a tuple of length D containing iterables of length N.
"""
kdims = param.List(default=[Dimension('x')], bounds=(1,2), doc="""
The key dimensions of the Chart, determining the number of
indexable dimensions.""")
group = param.String(default='Chart', constant=True)
vdims = param.List(default=[Dimension('y')], bounds=(1,None), doc="""
The value dimensions of the Chart, usually corresponding to a
number of dependent variables.""")
def __getitem__(self, index):
sliced = super(Chart, self).__getitem__(index)
if not isinstance(sliced, Chart):
return sliced
if not isinstance(index, tuple): index = (index,)
ndims = len(self.extents)//2
lower_bounds, upper_bounds = [None]*ndims, [None]*ndims
for i, slc in enumerate(index[:ndims]):
if isinstance(slc, slice):
lbound = self.extents[i]
ubound = self.extents[ndims:][i]
lower_bounds[i] = lbound if slc.start is None else slc.start
upper_bounds[i] = ubound if slc.stop is None else slc.stop
sliced.extents = tuple(lower_bounds+upper_bounds)
return sliced
class Scatter(Chart):
"""
Scatter is a Element2D type which gets displayed as a number of
disconnected points.
"""
group = param.String(default='Scatter', constant=True)
class Curve(Chart):
"""
Curve is a simple Chart Element providing 1D indexing along
the x-axis.
"""
group = param.String(default='Curve', constant=True)
class ErrorBars(Chart):
"""
ErrorBars is a Chart Element type representing any number of
errorbars situated in a 2D space. The errors must be supplied
as an Nx3 or Nx4 array representing the x/y-positions and
either the symmetric error or asymmetric errors respectively.
"""
group = param.String(default='ErrorBars', constant=True, doc="""
A string describing the quantity measured by the ErrorBars
object.""")
kdims = param.List(default=[Dimension('x')],
bounds=(1, 2), constant=True, doc="""
The Dimensions corresponding to the x- and y-positions of
the error bars.""")
vdims = param.List(default=[Dimension('y'), Dimension('yerror')],
bounds=(1, 3), constant=True)
def range(self, dim, data_range=True):
didx = self.get_dimension_index(dim)
dim = self.get_dimension(dim)
if didx == 1 and data_range and len(self):
mean = self.dimension_values(1)
neg_error = self.dimension_values(2)
if len(self.dimensions()) > 3:
pos_error = self.dimension_values(3)
else:
pos_error = neg_error
lower = np.nanmin(mean-neg_error)
upper = np.nanmax(mean+pos_error)
return util.dimension_range(lower, upper, dim)
return super(ErrorBars, self).range(dim, data_range)
class Spread(ErrorBars):
"""
Spread is a Chart Element type representing a spread of
values as given by a mean and standard error or confidence
intervals. Just like the ErrorBars Element type, mean and
deviations from the mean should be supplied as either an
Nx3 or Nx4 array representing the x-values, mean values
and symmetric or asymmetric errors respective. Internally
the data is always expanded to an Nx4 array.
"""
group = param.String(default='Spread', constant=True)
class Bars(Chart):
"""
Bars is an Element type, representing a number of stacked and
grouped bars, depending the dimensionality of the key and value
dimensions. Bars is useful for categorical data, which may be
laid via groups, categories and stacks.
"""
group = param.String(default='Bars', constant=True)
kdims = param.List(default=[Dimension('x')], bounds=(1,3))
vdims = param.List(default=[Dimension('y')], bounds=(1, None))
class BoxWhisker(Chart):
"""
BoxWhisker represent data as a distributions highlighting
the median, mean and various percentiles.
"""
group = param.String(default='BoxWhisker', constant=True)
kdims = param.List(default=[], bounds=(0,None))
vdims = param.List(default=[Dimension('y')], bounds=(1,1))
_auto_indexable_1d = False
class Histogram(Element2D):
"""
Histogram contains a number of bins, which are defined by the
upper and lower bounds of their edges and the computed bin values.
"""
kdims = param.List(default=[Dimension('x')], bounds=(1,1), doc="""
Dimensions on Element2Ds determine the number of indexable
dimensions.""")
group = param.String(default='Histogram', constant=True)
vdims = param.List(default=[Dimension('Frequency')], bounds=(1,1))
def __init__(self, values, edges=None, **params):
self.values, self.edges, settings = self._process_data(values, edges)
settings.update(params)
super(Histogram, self).__init__((self.values, self.edges), **settings)
def __getitem__(self, key):
"""
Implements slicing or indexing of the Histogram
"""
if key in self.dimensions(): return self.dimension_values(key)
if key is () or key is Ellipsis: return self # May no longer be necessary
key = util.process_ellipses(self, key)
if not isinstance(key, tuple): pass
elif len(key) == self.ndims + 1:
if key[-1] != slice(None) and (key[-1] not in self.vdims):
raise KeyError("%r is the only selectable value dimension" %
self.vdims[0].name)
key = key[0]
elif len(key) == self.ndims + 1: key = key[0]
else:
raise KeyError("Histogram cannot slice more than %d dimension."
% len(self.kdims)+1)
centers = [(float(l)+r)/2 for (l,r) in zip(self.edges, self.edges[1:])]
if isinstance(key, slice):
start, stop = key.start, key.stop
if [start, stop] == [None,None]: return self
start_idx, stop_idx = None,None
if start is not None:
start_idx = np.digitize([start], centers, right=True)[0]
if stop is not None:
stop_idx = np.digitize([stop], centers, right=True)[0]
slice_end = stop_idx+1 if stop_idx is not None else None
slice_values = self.values[start_idx:stop_idx]
slice_edges = self.edges[start_idx: slice_end]
extents = (min(slice_edges), self.extents[1],
max(slice_edges), self.extents[3])
return self.clone((slice_values, slice_edges), extents=extents)
else:
if not (self.edges.min() <= key < self.edges.max()):
raise KeyError("Key value %s is out of the histogram bounds" % key)
idx = np.digitize([key], self.edges)[0]
return self.values[idx-1 if idx>0 else idx]
def _process_data(self, values, edges):
"""
Ensure that edges are specified as left and right edges of the
histogram bins rather than bin centers.
"""
settings = {}
(values, edges) = values if isinstance(values, tuple) else (values, edges)
if isinstance(values, Chart):
settings = dict(values.get_param_values(onlychanged=True))
edges = values.dimension_values(0)
values = values.dimension_values(1)
elif isinstance(values, np.ndarray) and len(values.shape) == 2:
edges = values[:, 0]
values = values[:, 1]
elif all(isinstance(el, tuple) for el in values):
edges, values = zip(*values)
else:
values = np.array(values)
if edges is None:
edges = np.arange(len(values), dtype=np.float)
else:
edges = np.array(edges, dtype=np.float)
if len(edges) == len(values):
edges = compute_edges(edges)
return values, edges, settings
def range(self, dimension, data_range=True):
if self.get_dimension_index(dimension) == 0 and data_range:
dim = self.get_dimension(dimension)
lower, upper = np.min(self.edges), np.max(self.edges)
return util.dimension_range(lower, upper, dim)
else:
return super(Histogram, self).range(dimension, data_range)
def dimension_values(self, dim):
dim = self.get_dimension(dim, strict=True).name
if dim in self.vdims:
return self.values
elif dim in self.kdims:
return np.convolve(self.edges, np.ones((2,))/2, mode='valid')
else:
return super(Histogram, self).dimension_values(dim)
def sample(self, samples=[], **sample_values):
raise NotImplementedError('Cannot sample a Histogram.')
def reduce(self, dimensions=None, function=None, **reduce_map):
raise NotImplementedError('Reduction of Histogram not implemented.')
class Points(Chart):
"""
Allows sets of points to be positioned over a sheet coordinate
system. Each points may optionally be associated with a chosen
numeric value.
The input data can be a Nx2 or Nx3 Numpy array where the first two
columns corresponds to the X,Y coordinates in sheet coordinates,
within the declared bounding region. For Nx3 arrays, the third
column corresponds to the magnitude values of the points. Any
additional columns will be ignored (use VectorFields instead).
The input data may be also be passed as a tuple of elements that
may be numpy arrays or values that can be cast to arrays. When
such a tuple is supplied, the elements are joined column-wise into
a single array, allowing the magnitudes to be easily supplied
separately.
Note that if magnitudes are to be rendered correctly by default,
they should lie in the range [0,1].
"""
kdims = param.List(default=[Dimension('x'), Dimension('y')],
bounds=(2, 2), constant=True, doc="""
The label of the x- and y-dimension of the Points in form
of a string or dimension object.""")
group = param.String(default='Points', constant=True)
vdims = param.List(default=[])
_min_dims = 2 # Minimum number of columns
class VectorField(Points):
"""
A VectorField contains is a collection of vectors where each
vector has an associated position in sheet coordinates.
The constructor of VectorField is similar to the constructor of
Points: the input data can be an NxM Numpy array where the first
two columns corresponds to the X,Y coordinates in sheet
coordinates, within the declared bounding region. As with Points,
the input can be a tuple of array objects or of objects that can
be cast to arrays (the tuple elements are joined column-wise).
The third column maps to the vector angle which must be specified
in radians. Note that it is possible to supply a collection which
isn't a numpy array, whereby each element of the collection is
assumed to be an iterable corresponding to a single column of the
NxM array.
The visualization of any additional columns is decided by the
plotting code. For instance, the fourth and fifth columns could
correspond to arrow length and colour map value. All that is
assumed is that these additional dimension are normalized between
0.0 and 1.0 for the default visualization to work well.
The only restriction is that the final data array is NxM where
M>3. In other words, the vector must have a dimensionality of 2 or
higher.
"""
group = param.String(default='VectorField', constant=True)
vdims = param.List(default=[Dimension('Angle', cyclic=True, range=(0,2*np.pi)),
Dimension('Magnitude')], bounds=(1, None))
_null_value = np.array([[], [], [], []]).T # For when data is None
_min_dims = 3 # Minimum number of columns
def __init__(self, data, **params):
if isinstance(data, list) and all(isinstance(d, np.ndarray) for d in data):
data = np.column_stack([d.flat if d.ndim > 1 else d for d in data])
super(VectorField, self).__init__(data, **params)
class Spikes(Chart):
"""
Spikes is a 1D or 2D Element, which represents a series of
vertical or horizontal lines distributed along some dimension. If
an additional dimension is supplied it will be used to specify the
height of the lines. The Element may therefore be used to
represent 1D distributions, spectrograms or spike trains in
electrophysiology.
"""
group = param.String(default='Spikes', constant=True)
kdims = param.List(default=[Dimension('x')], bounds=(1, 1))
vdims = param.List(default=[])
_auto_indexable_1d = False
class Area(Curve):
"""
An Area Element represents the area under a Curve
and is specified in the same format as a regular
Curve, with the key dimension corresponding to a
column of x-values and the value dimension
corresponding to a column of y-values. Optionally
a second value dimension may be supplied to shade
the region between the curves.
"""
group = param.String(default='Area', constant=True)
@classmethod
def stack(cls, areas):
"""
Stacks an (Nd)Overlay of Area or Curve Elements by offsetting
their baselines. To stack a HoloMap or DynamicMap use the map
method.
"""
baseline = np.zeros(len(areas.get(0)))
stacked = areas.clone(shared_data=False)
vdims = [areas.get(0).vdims[0], 'Baseline']
for k, area in areas.items():
x, y = (area.dimension_values(i) for i in range(2))
stacked[k] = area.clone((x, y+baseline, baseline), vdims=vdims,
new_type=Area)
baseline += y
return stacked
| 1 | 18,849 | Something to mention in the next changelog/release notes. It will be good to get histogram working consistently with everything else. | holoviz-holoviews | py |
@@ -892,6 +892,7 @@ spec:
values:
- {{ $targetAffinityVal }}
topologyKey: kubernetes.io/hostname
+ namespaces: [{{.Volume.runNamespace}}]
{{- end }}
containers:
- args: | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO
// Rename this file by removing the version suffix information
package v1alpha1
const jivaVolumeYamls = `
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: jiva-volume-read-default
spec:
defaultConfig:
- name: OpenEBSNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- jiva-volume-isvalidversion-default
- jiva-volume-podsinopenebsns-default
- jiva-volume-read-listtargetservice-default
- jiva-volume-read-listtargetpod-default
- jiva-volume-read-listreplicapod-default
- jiva-volume-read-verifyreplicationfactor-default
- jiva-volume-read-patchreplicadeployment-default
output: jiva-volume-read-output-default
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: jiva-volume-create-default
spec:
defaultConfig:
- name: OpenEBSNamespace
value: {{env "OPENEBS_NAMESPACE"}}
# The value will be filled by the installer.
# Administrator can select to deploy the jiva pods
# in the openebs namespace instead of target namespace
# for the following reasons:
# - avoid granting access to hostpath in user namespace
# - manage all the storage pods in a single namespace
# By default, this is set to false to retain
# backward compatability. However in the future releases
# if more and more deployments prefer to use this option,
# the default can be set to deploy in openebs.
- name: DeployInOpenEBSNamespace
enabled: "true"
- name: ControllerImage
value: {{env "OPENEBS_IO_JIVA_CONTROLLER_IMAGE" | default "openebs/jiva:latest"}}
- name: ReplicaImage
value: {{env "OPENEBS_IO_JIVA_REPLICA_IMAGE" | default "openebs/jiva:latest"}}
- name: VolumeMonitorImage
value: {{env "OPENEBS_IO_VOLUME_MONITOR_IMAGE" | default "openebs/m-exporter:latest"}}
- name: ReplicaCount
value: {{env "OPENEBS_IO_JIVA_REPLICA_COUNT" | default "3" | quote }}
- name: StoragePool
value: "default"
- name: VolumeMonitor
enabled: "true"
# TargetTolerations allows you to specify the tolerations for target
# Example:
# - name: TargetTolerations
# value: |-
# t1:
# key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoSchedule"
# t2:
# key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoExecute"
- name: TargetTolerations
value: "none"
# ReplicaTolerations allows you to specify the tolerations for target
# Example:
# - name: ReplicaTolerations
# value: |-
# t1:
# key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoSchedule"
# t2:
# key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoExecute"
- name: ReplicaTolerations
value: "none"
- name: EvictionTolerations
value: |-
t1:
effect: NoExecute
key: node.alpha.kubernetes.io/notReady
operator: Exists
t2:
effect: NoExecute
key: node.alpha.kubernetes.io/unreachable
operator: Exists
t3:
effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
t4:
effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
t5:
effect: NoExecute
key: node.kubernetes.io/out-of-disk
operator: Exists
t6:
effect: NoExecute
key: node.kubernetes.io/memory-pressure
operator: Exists
t7:
effect: NoExecute
key: node.kubernetes.io/disk-pressure
operator: Exists
t8:
effect: NoExecute
key: node.kubernetes.io/network-unavailable
operator: Exists
t9:
effect: NoExecute
key: node.kubernetes.io/unschedulable
operator: Exists
t10:
effect: NoExecute
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- name: NodeAffinityRequiredSchedIgnoredExec
value: |-
t1:
key: beta.kubernetes.io/os
operator: In
values:
- linux
- name: NodeAffinityPreferredSchedIgnoredExec
value: |-
t1:
key: some-node-label-key
operator: In
values:
- some-node-label-value
# TargetResourceRequests allow you to specify resource requests that need to be available
# before scheduling the containers. If not specified, the default is to use the limits
# from TargetResourceLimits or the default requests set in the cluster.
- name: TargetResourceRequests
value: "none"
# TargetResourceLimits allow you to set the limits on memory and cpu for jiva
# target pods. The resource and limit value should be in the same format as
# expected by Kubernetes. Example:
#- name: TargetResourceLimits
# value: |-
# memory: 1Gi
# cpu: 200m
# By default, the resource limits are disabled.
- name: TargetResourceLimits
value: "none"
# ReplicaResourceRequests allow you to specify resource requests that need to be available
# before scheduling the containers. If not specified, the default is to use the limits
# from ReplicaResourceLimits or the default requests set in the cluster.
- name: ReplicaResourceRequests
value: "none"
# ReplicaResourceLimits allow you to set the limits on memory and cpu for jiva
# replica pods. The resource and limit value should be in the same format as
# expected by Kubernetes. Example:
- name: ReplicaResourceLimits
value: "none"
# AuxResourceRequests allow you to set requests on side cars. Requests have to be specified
# in the format expected by Kubernetes
- name: AuxResourceRequests
value: "none"
# AuxResourceLimits allow you to set limits on side cars. Limits have to be specified
# in the format expected by Kubernetes
- name: AuxResourceLimits
value: "none"
# ReplicaAntiAffinityTopoKey is used to schedule replica pods
# of a given volume/application, such that they are:
# - not co-located on the same node. (kubernetes.io/hostname)
# - not co-located on the same availability zone.(failure-domain.beta.kubernetes.io/zone)
# The value for toplogy key can be anything supported by Kubernetes
# clusters. It is possible that some cluster might support topology schemes
# like the rack or floor.
#
# Examples:
# kubernetes.io/hostname (default)
# failure-domain.beta.kubernetes.io/zone
# failure-domain.beta.kubernetes.io/region
- name: ReplicaAntiAffinityTopoKey
value: "kubernetes.io/hostname"
# TargetNodeSelector allows you to specify the nodes where
# openebs targets have to be scheduled. To use this feature,
# the nodes should already be labeled with the key=value. For example:
# "kubectl label nodes <node-name> nodetype=storage"
# Note: It is recommended that node selector for replica specify
# nodes that have disks/ssds attached to them. Example:
#- name: TargetNodeSelector
# value: |-
# nodetype: storage
- name: TargetNodeSelector
value: "none"
# ReplicaNodeSelector allows you to specify the nodes where
# openebs replicas have to be scheduled. To use this feature,
# the nodes should already be labeled with the key=value. For example:
# "kubectl label nodes <node-name> nodetype=storage"
# Note: It is recommended that node selector for replica specify
# nodes that have disks/ssds attached to them. Example:
#- name: ReplicaNodeSelector
# value: |-
# nodetype: storage
- name: ReplicaNodeSelector
value: "none"
# FSType specifies the format type that Kubernetes should use to
# mount the Persistent Volume. Note that there are no validations
# done to check the validity of the FsType
- name: FSType
value: "ext4"
# Lun specifies the lun number with which Kubernetes should login
# to iSCSI Volume (i.e OpenEBS Persistent Volume)
- name: Lun
value: "0"
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- jiva-volume-create-getstorageclass-default
- jiva-volume-create-getpvc-default
- jiva-volume-create-puttargetservice-default
- jiva-volume-create-getstoragepoolcr-default
- jiva-volume-create-putreplicadeployment-default
- jiva-volume-create-puttargetdeployment-default
output: jiva-volume-create-output-default
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: jiva-volume-delete-default
spec:
defaultConfig:
- name: OpenEBSNamespace
value: {{env "OPENEBS_NAMESPACE"}}
- name: ScrubImage
value: {{env "OPENEBS_IO_HELPER_IMAGE" | default "quay.io/openebs/linux-utils:latest"}}
# RetainReplicaData specifies whether jiva replica data folder
# should be cleared or retained.
- name: RetainReplicaData
enabled: "false"
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- jiva-volume-isvalidversion-default
- jiva-volume-podsinopenebsns-default
- jiva-volume-delete-listtargetservice-default
- jiva-volume-delete-listtargetdeployment-default
- jiva-volume-delete-listreplicadeployment-default
- jiva-volume-delete-deletetargetservice-default
- jiva-volume-delete-deletetargetdeployment-default
- jiva-volume-delete-listreplicapod-default
- jiva-volume-delete-deletereplicadeployment-default
- jiva-volume-delete-putreplicascrub-default
output: jiva-volume-delete-output-default
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: jiva-volume-list-default
spec:
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- jiva-volume-list-listtargetservice-default
- jiva-volume-list-listtargetpod-default
- jiva-volume-list-listreplicapod-default
- jiva-volume-list-listpv-default
output: jiva-volume-list-output-default
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-isvalidversion-default
spec:
meta: |
id: is070jivavolume
runNamespace: {{.Volume.runNamespace}}
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: vsm={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "is070jivavolume.name" .TaskResult | noop -}}
{{- .TaskResult.is070jivavolume.name | empty | not | versionMismatchErr "is not a jiva volume of 0.7.0 version" | saveIf "is070jivavolume.versionMismatchErr" .TaskResult | noop -}}
---
# Use this generic task in jiva operations like
# read, delete or snapshot to determine if the
# jiva pods were created in openebs namespace or
# pvc namespace. This task will check if the service
# is deployed in openebs and saves the result.
# Each of the further run tasks, will check on this
# saved result to determine if the read operations
# should be performed on openebs or pvc namespace.
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-podsinopenebsns-default
spec:
meta: |
id: jivapodsinopenebsns
runNamespace: {{ .Config.OpenEBSNamespace.value }}
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/controller-service=jiva-controller-svc,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.namespace}" | trim | saveAs "jivapodsinopenebsns.ns" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-list-listtargetservice-default
spec:
meta: |
{{- $nss := .Volume.runNamespace | default "" | splitList ", " -}}
id: listlistsvc
repeatWith:
metas:
{{- range $k, $ns := $nss }}
- runNamespace: {{ $ns }}
{{- end }}
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/controller-service=jiva-controller-svc
post: |
{{- $servicePairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.namespace}/{@.metadata.labels.openebs\\.io/persistent-volume},clusterIP={@.spec.clusterIP};{end}" | trim | default "" | splitList ";" -}}
{{- $servicePairs | keyMap "volumeList" .ListItems | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-list-listtargetpod-default
spec:
meta: |
{{- $nss := .Volume.runNamespace | default "" | splitList ", " -}}
id: listlistctrl
repeatWith:
metas:
{{- range $k, $ns := $nss }}
- runNamespace: {{ $ns }}
{{- end }}
apiVersion: v1
kind: Pod
action: list
options: |-
labelSelector: openebs.io/controller=jiva-controller
post: |
{{- $controllerPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.namespace}/{@.metadata.labels.openebs\\.io/persistent-volume},controllerIP={@.status.podIP},controllerStatus={@.status.containerStatuses[*].ready};{end}" | trim | default "" | splitList ";" -}}
{{- $controllerPairs | keyMap "volumeList" .ListItems | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-list-listreplicapod-default
spec:
meta: |
{{- $nss := .Volume.runNamespace | default "" | splitList ", " -}}
id: listlistrep
repeatWith:
metas:
{{- range $k, $ns := $nss }}
- runNamespace: {{ $ns }}
{{- end }}
apiVersion: v1
kind: Pod
action: list
options: |-
labelSelector: openebs.io/replica=jiva-replica
post: |
{{- $replicaPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.namespace}/{@.metadata.labels.openebs\\.io/persistent-volume},replicaIP={@.status.podIP},replicaStatus={@.status.containerStatuses[*].ready},capacity={@.metadata.annotations.openebs\\.io/capacity};{end}" | trim | default "" | splitList ";" -}}
{{- $replicaPairs | keyMap "volumeList" .ListItems | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-list-listpv-default
spec:
meta: |
id: listlistpv
apiVersion: v1
kind: PersistentVolume
action: list
options: |-
labelSelector: openebs.io/cas-type=jiva
post: |
{{- $pvPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.name},accessModes={@.spec.accessModes[0]},storageClass={@.spec.storageClassName};{end}" | trim | default "" | splitList ";" -}}
{{- $pvPairs | keyMap "pvList" .ListItems | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-list-output-default
spec:
meta: |
id : listoutput
action: output
kind: CASVolumeList
apiVersion: v1alpha1
task: |
kind: CASVolumeList
items:
{{- $pvList := .ListItems.pvList }}
{{- range $pkey, $map := .ListItems.volumeList }}
{{- $capacity := pluck "capacity" $map | first | default "" | splitList ", " | first }}
{{- $clusterIP := pluck "clusterIP" $map | first }}
{{- $controllerIP := pluck "controllerIP" $map | first }}
{{- $controllerStatus := pluck "controllerStatus" $map | first }}
{{- $replicaIP := pluck "replicaIP" $map | first }}
{{- $replicaStatus := pluck "replicaStatus" $map | first }}
{{- $name := $pkey | splitList "/" | last }}
{{- $ns := $pkey | splitList "/" | first }}
{{- $pvInfo := pluck $name $pvList | first }}
- kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ $name }}
namespace: {{ $ns }}
annotations:
openebs.io/storage-class: {{ $pvInfo.storageClass | default "" }}
vsm.openebs.io/controller-ips: {{ $controllerIP }}
vsm.openebs.io/cluster-ips: {{ $clusterIP }}
vsm.openebs.io/iqn: iqn.2016-09.com.openebs.jiva:{{ $name }}
vsm.openebs.io/replica-count: {{ $replicaIP | default "" | splitList ", " | len }}
vsm.openebs.io/volume-size: {{ $capacity }}
vsm.openebs.io/replica-ips: {{ $replicaIP }}
vsm.openebs.io/replica-status: {{ $replicaStatus | replace "true" "running" | replace "false" "notready" }}
vsm.openebs.io/controller-status: {{ $controllerStatus | replace "true" "running" | replace "false" "notready" | replace " " "," }}
vsm.openebs.io/targetportals: {{ $clusterIP }}:3260
openebs.io/controller-ips: {{ $controllerIP }}
openebs.io/cluster-ips: {{ $clusterIP }}
openebs.io/iqn: iqn.2016-09.com.openebs.jiva:{{ $name }}
openebs.io/replica-count: {{ $replicaIP | default "" | splitList ", " | len }}
openebs.io/volume-size: {{ $capacity }}
openebs.io/replica-ips: {{ $replicaIP }}
openebs.io/replica-status: {{ $replicaStatus | replace "true" "running" | replace "false" "notready" }}
openebs.io/controller-status: {{ $controllerStatus | replace "true" "running" | replace "false" "notready" | replace " " "," }}
openebs.io/targetportals: {{ $clusterIP }}:3260
spec:
accessMode: {{ $pvInfo.accessModes | default "" }}
capacity: {{ $capacity }}
iqn: iqn.2016-09.com.openebs.jiva:{{ $name }}
targetPortal: {{ $clusterIP }}:3260
replicas: {{ .TaskResult.readlistrep.podIP | default "" | splitList " " | len }}
casType: jiva
targetIP: {{ $clusterIP }}
targetPort: 3260
{{- end -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-read-listtargetservice-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: readlistsvc
runNamespace: {{ $jivapodsns }}
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/controller-service=jiva-controller-svc,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistsvc.items" .TaskResult | noop -}}
{{- .TaskResult.readlistsvc.items | notFoundErr "controller service not found" | saveIf "readlistsvc.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.clusterIP}" | trim | saveAs "readlistsvc.clusterIP" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-read-listtargetpod-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: readlistctrl
runNamespace: {{ $jivapodsns }}
apiVersion: v1
kind: Pod
action: list
options: |-
labelSelector: openebs.io/controller=jiva-controller,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistctrl.items" .TaskResult | noop -}}
{{- .TaskResult.readlistctrl.items | notFoundErr "controller pod not found" | saveIf "readlistctrl.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.nodeName}" | trim | saveAs "readlistctrl.targetNodeName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.podIP}" | trim | saveAs "readlistctrl.podIP" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.containerStatuses[*].ready}" | trim | saveAs "readlistctrl.status" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/fs-type}" | trim | default "ext4" | saveAs "readlistctrl.fsType" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/lun}" | trim | default "0" | int | saveAs "readlistctrl.lun" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-read-listreplicapod-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: readlistrep
runNamespace: {{ $jivapodsns }}
apiVersion: v1
kind: Pod
action: list
options: |-
labelSelector: openebs.io/replica=jiva-replica,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistrep.items" .TaskResult | noop -}}
{{- .TaskResult.readlistrep.items | notFoundErr "replica pod(s) not found" | saveIf "readlistrep.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.podIP}" | trim | saveAs "readlistrep.podIP" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.containerStatuses[*].ready}" | trim | saveAs "readlistrep.status" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/capacity}" | trim | saveAs "readlistrep.capacity" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.nodeName}" | trim | saveAs "readlistrep.nodeNames" .TaskResult | noop -}}
{{- .TaskResult.readlistrep.nodeNames | default "" | splitListLen " " | saveAs "readlistrep.noOfReplicas" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-read-verifyreplicationfactor-default
spec:
meta: |
{{ $isPatchValNotEmpty := ne .Volume.isPatchJivaReplicaNodeAffinity "" }}
{{ $isPatchValEnabled := eq .Volume.isPatchJivaReplicaNodeAffinity "enabled" }}
{{ $shouldPatch := and $isPatchValNotEmpty $isPatchValEnabled | toString }}
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: verifyreplicationfactor
runNamespace: {{ $jivapodsns }}
apiVersion: apps/v1
kind: Deployment
action: list
disable: {{ ne $shouldPatch "true" }}
options: |-
labelSelector: openebs.io/replica=jiva-replica,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "verifyreplicationfactor.items" .TaskResult | noop -}}
{{- $errMsg := printf "replica deployment not found" -}}
{{- .TaskResult.verifyreplicationfactor.items | notFoundErr $errMsg | saveIf "verifyreplicationfactor.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.verifyreplicationfactor.items | default "" | splitListLen " "| saveAs "verifyreplicationfactor.noOfReplicas" .TaskResult | noop -}}
{{- $expectedRepCount := .TaskResult.verifyreplicationfactor.noOfReplicas | int -}}
{{- $msg := printf "expected %v no of replica pod(s), found only %v replica pod(s)" $expectedRepCount .TaskResult.readlistrep.noOfReplicas -}}
{{- .TaskResult.readlistrep.nodeNames | default "" | splitList " " | isLen $expectedRepCount | not | verifyErr $msg | saveIf "verifyreplicationfactor.verifyErr" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-read-patchreplicadeployment-default
spec:
meta: |
{{- $numbers := mkNumberedSlice .Config.ReplicaCount.value -}}
{{ $owner := .Volume.owner }}
{{ $isPatchValNotEmpty := ne .Volume.isPatchJivaReplicaNodeAffinity "" }}
{{ $isPatchValEnabled := eq .Volume.isPatchJivaReplicaNodeAffinity "enabled" }}
{{ $shouldPatch := and $isPatchValNotEmpty $isPatchValEnabled | toString }}
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: readpatchrep
runNamespace: {{ $jivapodsns }}
apiVersion: apps/v1
kind: Deployment
disable: {{ ne $shouldPatch "true" }}
action: patch
repeatWith:
metas:
{{- range $k, $n := $numbers }}
- objectName: {{ $owner }}-rep-{{ $n }}
{{- end }}
task: |
{{- $nodeNames := .TaskResult.readlistrep.nodeNames -}}
type: strategic
pspec: |-
spec:
template:
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
{{- if ne $nodeNames "" }}
{{- $nodeNamesMap := $nodeNames | split " " }}
{{- range $k, $v := $nodeNamesMap }}
{{/* patch the first node from the map in each iteration */}}
{{- if eq $k "_0" }}
- {{ kubeNodeGetHostNameOrNodeName $v }}
{{- end }}
{{- end }}
{{- end }}
post: |
{{/* remove the first node from the original space separated string to
make sure every deployment is patched with different node */}}
{{- removeFirstElement .TaskResult.readlistrep.nodeNames | saveAs "readlistrep.nodeNames" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-read-output-default
spec:
meta: |
id : readoutput
action: output
kind: CASVolume
apiVersion: v1alpha1
task: |
{{- $capacity := .TaskResult.readlistrep.capacity | default "" | splitList " " | first -}}
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
annotations:
vsm.openebs.io/controller-ips: {{ .TaskResult.readlistctrl.podIP | default "" | splitList " " | first }}
vsm.openebs.io/cluster-ips: {{ .TaskResult.readlistsvc.clusterIP }}
vsm.openebs.io/controller-node-name: {{ .TaskResult.readlistctrl.targetNodeName | default "" }}
vsm.openebs.io/iqn: iqn.2016-09.com.openebs.jiva:{{ .Volume.owner }}
vsm.openebs.io/replica-count: {{ .TaskResult.readlistrep.podIP | default "" | splitList " " | len }}
vsm.openebs.io/volume-size: {{ $capacity }}
vsm.openebs.io/replica-ips: {{ .TaskResult.readlistrep.podIP | default "" | splitList " " | join "," }}
vsm.openebs.io/replica-status: {{ .TaskResult.readlistrep.status | default "" | splitList " " | join "," | replace "true" "running" | replace "false" "notready" }}
vsm.openebs.io/controller-status: {{ .TaskResult.readlistctrl.status | default "" | splitList " " | join "," | replace "true" "running" | replace "false" "notready" }}
vsm.openebs.io/targetportals: {{ .TaskResult.readlistsvc.clusterIP }}:3260
openebs.io/controller-ips: {{ .TaskResult.readlistctrl.podIP | default "" | splitList " " | first }}
openebs.io/cluster-ips: {{ .TaskResult.readlistsvc.clusterIP }}
openebs.io/controller-node-name: {{ .TaskResult.readlistctrl.targetNodeName | default "" }}
openebs.io/iqn: iqn.2016-09.com.openebs.jiva:{{ .Volume.owner }}
openebs.io/replica-count: {{ .TaskResult.readlistrep.podIP | default "" | splitList " " | len }}
openebs.io/volume-size: {{ $capacity }}
openebs.io/replica-ips: {{ .TaskResult.readlistrep.podIP | default "" | splitList " " | join "," }}
openebs.io/replica-status: {{ .TaskResult.readlistrep.status | default "" | splitList " " | join "," | replace "true" "running" | replace "false" "notready" }}
openebs.io/controller-status: {{ .TaskResult.readlistctrl.status | default "" | splitList " " | join "," | replace "true" "running" | replace "false" "notready" }}
openebs.io/targetportals: {{ .TaskResult.readlistsvc.clusterIP }}:3260
spec:
capacity: {{ $capacity }}
targetPortal: {{ .TaskResult.readlistsvc.clusterIP }}:3260
iqn: iqn.2016-09.com.openebs.jiva:{{ .Volume.owner }}
replicas: {{ .TaskResult.readlistrep.podIP | default "" | splitList " " | len }}
targetIP: {{ .TaskResult.readlistsvc.clusterIP }}
targetPort: 3260
lun: {{ .TaskResult.readlistctrl.lun }}
fsType: {{ .TaskResult.readlistctrl.fsType }}
casType: jiva
---
#Creating a Target Service is the first operation in
#creating K8s objects for the given PVC. Determine
#the namespace and save it for further create options.
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-create-puttargetservice-default
spec:
meta: |
{{- $deployInOpenEBSNamespace := .Config.DeployInOpenEBSNamespace.enabled | default "true" | lower -}}
id: createputsvc
{{- if eq $deployInOpenEBSNamespace "true" }}
runNamespace: {{ .Config.OpenEBSNamespace.value | trim | saveAs "createputsvc.jivapodsns" .TaskResult }}
{{ else }}
runNamespace: {{ .Volume.runNamespace | trim | saveAs "createputsvc.jivapodsns" .TaskResult }}
{{ end }}
apiVersion: v1
kind: Service
action: put
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "createputsvc.objectName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.spec.clusterIP}" | trim | saveAs "createputsvc.clusterIP" .TaskResult | noop -}}
task: |
apiVersion: v1
Kind: Service
metadata:
annotations:
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
labels:
openebs.io/storage-engine-type: jiva
openebs.io/cas-type: jiva
openebs.io/controller-service: jiva-controller-svc
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
pvc: {{ .Volume.pvc }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
name: {{ .Volume.owner }}-ctrl-svc
spec:
ports:
- name: iscsi
port: 3260
protocol: TCP
targetPort: 3260
- name: api
port: 9501
protocol: TCP
targetPort: 9501
- name: exporter
port: 9500
protocol: TCP
targetPort: 9500
selector:
openebs.io/controller: jiva-controller
openebs.io/persistent-volume: {{ .Volume.owner }}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-create-getstoragepoolcr-default
spec:
meta: |
id: creategetpath
apiVersion: openebs.io/v1alpha1
kind: StoragePool
objectName: {{ .Config.StoragePool.value }}
action: get
post: |
{{- jsonpath .JsonResult "{.spec.path}" | trim | saveAs "creategetpath.storagePoolPath" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-create-getstorageclass-default
spec:
meta: |
id: creategetsc
apiVersion: storage.k8s.io/v1
kind: StorageClass
objectName: {{ .Volume.storageclass }}
action: get
post: |
{{- $resourceVer := jsonpath .JsonResult "{.metadata.resourceVersion}" -}}
{{- trim $resourceVer | saveAs "creategetsc.storageClassVersion" .TaskResult | noop -}}
{{- $stsTargetAffinity := jsonpath .JsonResult "{.metadata.labels.openebs\\.io/sts-target-affinity}" | trim | default "none" -}}
{{- $stsTargetAffinity | saveAs "stsTargetAffinity" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-create-getpvc-default
spec:
meta: |
id: creategetpvc
apiVersion: v1
runNamespace: {{ .Volume.runNamespace }}
kind: PersistentVolumeClaim
objectName: {{ .Volume.pvc }}
action: get
post: |
{{- $replicaAntiAffinity := jsonpath .JsonResult "{.metadata.labels.openebs\\.io/replica-anti-affinity}" | trim | default "none" -}}
{{- $replicaAntiAffinity | saveAs "creategetpvc.replicaAntiAffinity" .TaskResult | noop -}}
{{- $targetAffinity := jsonpath .JsonResult "{.metadata.labels.openebs\\.io/target-affinity}" | trim | default "none" -}}
{{- $targetAffinity | saveAs "creategetpvc.targetAffinity" .TaskResult | noop -}}
{{- $stsTargetAffinity := jsonpath .JsonResult "{.metadata.labels.openebs\\.io/sts-target-affinity}" | trim | default "none" -}}
{{- if ne $stsTargetAffinity "none" -}}
{{- $stsTargetAffinity | saveAs "stsTargetAffinity" .TaskResult | noop -}}
{{- end -}}
{{- if ne .TaskResult.stsTargetAffinity "none" -}}
{{- printf "%s-%s" .TaskResult.stsTargetAffinity ((splitList "-" .Volume.pvc) | last) | default "none" | saveAs "sts.applicationName" .TaskResult -}}
{{- end -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-create-puttargetdeployment-default
spec:
meta: |
id: createputctrl
runNamespace: {{ .TaskResult.createputsvc.jivapodsns }}
apiVersion: apps/v1
kind: Deployment
action: put
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "createputctrl.objectName" .TaskResult | noop -}}
task: |
{{- $isMonitor := .Config.VolumeMonitor.enabled | default "true" | lower -}}
{{- $setResourceRequests := .Config.TargetResourceRequests.value | default "none" -}}
{{- $resourceRequestsVal := fromYaml .Config.TargetResourceRequests.value -}}
{{- $setResourceLimits := .Config.TargetResourceLimits.value | default "none" -}}
{{- $resourceLimitsVal := fromYaml .Config.TargetResourceLimits.value -}}
{{- $setAuxResourceRequests := .Config.AuxResourceRequests.value | default "none" -}}
{{- $auxResourceRequestsVal := fromYaml .Config.AuxResourceRequests.value -}}
{{- $setAuxResourceLimits := .Config.AuxResourceLimits.value | default "none" -}}
{{- $auxResourceLimitsVal := fromYaml .Config.AuxResourceLimits.value -}}
{{- $hasNodeSelector := .Config.TargetNodeSelector.value | default "none" -}}
{{- $nodeSelectorVal := fromYaml .Config.TargetNodeSelector.value -}}
{{- $targetAffinityVal := .TaskResult.creategetpvc.targetAffinity -}}
{{- $hasTargetToleration := .Config.TargetTolerations.value | default "none" -}}
{{- $targetTolerationVal := fromYaml .Config.TargetTolerations.value -}}
apiVersion: apps/v1
Kind: Deployment
metadata:
labels:
{{- if eq $isMonitor "true" }}
monitoring: "volume_exporter_prometheus"
{{- end}}
openebs.io/storage-engine-type: jiva
openebs.io/cas-type: jiva
openebs.io/controller: jiva-controller
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
annotations:
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
{{- if eq $isMonitor "true" }}
openebs.io/volume-monitor: "true"
{{- end}}
openebs.io/volume-type: jiva
openebs.io/fs-type: {{ .Config.FSType.value }}
openebs.io/lun: {{ .Config.Lun.value }}
name: {{ .Volume.owner }}-ctrl
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
openebs.io/controller: jiva-controller
openebs.io/persistent-volume: {{ .Volume.owner }}
template:
metadata:
labels:
{{- if eq $isMonitor "true" }}
monitoring: volume_exporter_prometheus
{{- end}}
openebs.io/controller: jiva-controller
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
openebs.io/version: {{ .CAST.version }}
annotations:
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
openebs.io/fs-type: {{ .Config.FSType.value }}
openebs.io/lun: {{ .Config.Lun.value }}
{{- if eq $isMonitor "true" }}
prometheus.io/path: /metrics
prometheus.io/port: "9500"
prometheus.io/scrape: "true"
{{- end}}
spec:
{{- if ne $hasNodeSelector "none" }}
nodeSelector:
{{- range $sK, $sV := $nodeSelectorVal }}
{{ $sK }}: {{ $sV }}
{{- end }}
{{- end}}
{{- if ne (.TaskResult.sts.applicationName | default "") "" }}
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: statefulset.kubernetes.io/pod-name
operator: In
values:
- {{ .TaskResult.sts.applicationName }}
topologyKey: kubernetes.io/hostname
{{- else if ne $targetAffinityVal "none" }}
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: openebs.io/target-affinity
operator: In
values:
- {{ $targetAffinityVal }}
topologyKey: kubernetes.io/hostname
{{- end }}
containers:
- args:
- controller
- --frontend
- gotgt
- --clusterIP
- {{ .TaskResult.createputsvc.clusterIP }}
- {{ .Volume.owner }}
command:
- launch
image: {{ .Config.ControllerImage.value }}
name: {{ .Volume.owner }}-ctrl-con
resources:
{{- if ne $setResourceLimits "none" }}
limits:
{{- range $rKey, $rLimit := $resourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
{{- if ne $setResourceRequests "none" }}
requests:
{{- range $rKey, $rReq := $resourceRequestsVal }}
{{ $rKey }}: {{ $rReq }}
{{- end }}
{{- end }}
env:
- name: "REPLICATION_FACTOR"
value: {{ .Config.ReplicaCount.value }}
ports:
- containerPort: 3260
protocol: TCP
- containerPort: 9501
protocol: TCP
{{- if eq $isMonitor "true" }}
- args:
- -c=http://127.0.0.1:9501
command:
- maya-exporter
image: {{ .Config.VolumeMonitorImage.value }}
name: maya-volume-exporter
resources:
{{- if ne $setAuxResourceRequests "none" }}
requests:
{{- range $rKey, $rLimit := $auxResourceRequestsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
{{- if ne $setAuxResourceLimits "none" }}
limits:
{{- range $rKey, $rLimit := $auxResourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
ports:
- containerPort: 9500
protocol: TCP
{{- end}}
tolerations:
- effect: NoExecute
key: node.alpha.kubernetes.io/notReady
operator: Exists
tolerationSeconds: 0
- effect: NoExecute
key: node.alpha.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 0
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 0
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 0
{{- if ne $hasTargetToleration "none" }}
{{- range $k, $v := $targetTolerationVal }}
-
{{- range $kk, $vv := $v }}
{{ $kk }}: {{ $vv }}
{{- end }}
{{- end }}
{{- end }}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-create-putreplicadeployment-default
spec:
meta: |
{{- $numbers := mkNumberedSlice .Config.ReplicaCount.value -}}
id: createputrep
runNamespace: {{ .TaskResult.createputsvc.jivapodsns }}
apiVersion: apps/v1
kind: Deployment
action: put
repeatWith:
resources:
{{- range $k, $v := $numbers }}
- {{ $v | quote }}
{{- end }}
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "createputrep.objectName" .TaskResult | noop -}}
task: |
{{- $isEvictionTolerations := .Config.EvictionTolerations.value | default "none" -}}
{{- $evictionTolerationsVal := fromYaml .Config.EvictionTolerations.value -}}
{{- $isCloneEnable := .Volume.isCloneEnable | default "false" -}}
{{- $setResourceRequests := .Config.ReplicaResourceRequests.value | default "none" -}}
{{- $resourceRequestsVal := fromYaml .Config.ReplicaResourceRequests.value -}}
{{- $setResourceLimits := .Config.ReplicaResourceLimits.value | default "none" -}}
{{- $resourceLimitsVal := fromYaml .Config.ReplicaResourceLimits.value -}}
{{- $replicaAntiAffinityVal := .TaskResult.creategetpvc.replicaAntiAffinity -}}
{{- $hasNodeSelector := .Config.ReplicaNodeSelector.value | default "none" -}}
{{- $nodeSelectorVal := fromYaml .Config.ReplicaNodeSelector.value -}}
{{- $hasReplicaToleration := .Config.ReplicaTolerations.value | default "none" -}}
{{- $replicaTolerationVal := fromYaml .Config.ReplicaTolerations.value -}}
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
openebs.io/storage-engine-type: jiva
openebs.io/cas-type: jiva
openebs.io/replica: jiva-replica
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
annotations:
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
openebs.io/capacity: {{ .Volume.capacity }}
openebs.io/storage-pool: {{ .Config.StoragePool.value }}
name: {{ .Volume.owner }}-rep-{{ .ListItems.currentRepeatResource }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
openebs.io/replica: jiva-replica
openebs.io/persistent-volume: {{ .Volume.owner }}
template:
metadata:
labels:
openebs.io/replica: jiva-replica
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
openebs.io/version: {{ .CAST.version }}
{{- if ne $replicaAntiAffinityVal "none" }}
openebs.io/replica-anti-affinity: {{ $replicaAntiAffinityVal }}
{{- end }}
annotations:
openebs.io/storage-class-ref: |
name: {{ .Volume.storageclass }}
resourceVersion: {{ .TaskResult.creategetsc.storageClassVersion }}
openebs.io/capacity: {{ .Volume.capacity }}
openebs.io/storage-pool: {{ .Config.StoragePool.value }}
spec:
{{- if ne $hasNodeSelector "none" }}
nodeSelector:
{{- range $sK, $sV := $nodeSelectorVal }}
{{ $sK }}: {{ $sV }}
{{- end }}
{{- end}}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
openebs.io/replica: jiva-replica
{{/* If PVC object has a replica anti-affinity value. Use it.
This is usually the case for STS that creates PVCs from a
PVC Template. So, a STS can have multiple PVs with their
unique id. To schedule/spread out replicas belonging to
different PV, a unique label associated with the STS should
be passed to all the PVCs tied to the STS. */}}
{{- if ne $replicaAntiAffinityVal "none" }}
openebs.io/replica-anti-affinity: {{ $replicaAntiAffinityVal }}
{{- else }}
openebs.io/persistent-volume: {{ .Volume.owner }}
{{- end }}
topologyKey: {{ .Config.ReplicaAntiAffinityTopoKey.value }}
containers:
- args:
- replica
- --frontendIP
- {{ .TaskResult.createputsvc.clusterIP }}
{{- if ne $isCloneEnable "false" }}
- --cloneIP
- {{ .Volume.sourceVolumeTargetIP }}
- --type
- "clone"
- --snapName
- {{ .Volume.snapshotName }}
{{- end }}
- --size
- {{ .Volume.capacity }}
- /openebs
securityContext:
privileged: true
command:
- launch
image: {{ .Config.ReplicaImage.value }}
name: {{ .Volume.owner }}-rep-con
resources:
{{- if ne $setResourceLimits "none" }}
limits:
{{- range $rKey, $rLimit := $resourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
{{- if ne $setResourceRequests "none" }}
requests:
{{- range $rKey, $rReq := $resourceRequestsVal }}
{{ $rKey }}: {{ $rReq }}
{{- end }}
{{- end }}
ports:
- containerPort: 9502
protocol: TCP
- containerPort: 9503
protocol: TCP
- containerPort: 9504
protocol: TCP
volumeMounts:
- name: openebs
mountPath: /openebs
tolerations:
{{- if ne $isEvictionTolerations "none" }}
{{- range $k, $v := $evictionTolerationsVal }}
-
{{- range $kk, $vv := $v }}
{{ $kk }}: {{ $vv }}
{{- end }}
{{- end }}
{{- end }}
{{- if ne $hasReplicaToleration "none" }}
{{- range $k, $v := $replicaTolerationVal }}
-
{{- range $kk, $vv := $v }}
{{ $kk }}: {{ $vv }}
{{- end }}
{{- end }}
{{- end }}
volumes:
- name: openebs
hostPath:
path: {{ .TaskResult.creategetpath.storagePoolPath }}/{{ .Volume.owner }}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-create-output-default
spec:
meta: |
id: createoutput
action: output
kind: CASVolume
apiVersion: v1alpha1
task: |
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
annotations:
openebs.io/storageclass-version: {{ .TaskResult.creategetsc.storageClassVersion }}
spec:
capacity: {{ .Volume.capacity }}
targetPortal: {{ .TaskResult.createputsvc.clusterIP }}:3260
iqn: iqn.2016-09.com.openebs.jiva:{{ .Volume.owner }}
replicas: {{ .Config.ReplicaCount.value }}
targetIP: {{ .TaskResult.readlistsvc.clusterIP }}
targetPort: 3260
casType: jiva
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-delete-listtargetservice-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: deletelistsvc
runNamespace: {{ $jivapodsns }}
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/controller-service=jiva-controller-svc,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistsvc.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistsvc.names | notFoundErr "controller service not found" | saveIf "deletelistsvc.notFoundErr" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-delete-listtargetdeployment-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: deletelistctrl
runNamespace: {{ $jivapodsns }}
apiVersion: apps/v1
kind: Deployment
action: list
options: |-
labelSelector: openebs.io/controller=jiva-controller,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistctrl.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistctrl.names | notFoundErr "controller deployment not found" | saveIf "deletelistctrl.notFoundErr" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-delete-listreplicadeployment-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: deletelistrep
runNamespace: {{ $jivapodsns }}
apiVersion: apps/v1
kind: Deployment
action: list
options: |-
labelSelector: openebs.io/replica=jiva-replica,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistrep.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistrep.names | notFoundErr "replica deployment not found" | saveIf "deletelistrep.notFoundErr" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-delete-deletetargetservice-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: deletedeletesvc
runNamespace: {{ $jivapodsns }}
apiVersion: v1
kind: Service
action: delete
objectName: {{ .TaskResult.deletelistsvc.names }}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-delete-deletetargetdeployment-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: deletedeletectrl
runNamespace: {{ $jivapodsns }}
apiVersion: apps/v1
kind: Deployment
action: delete
objectName: {{ .TaskResult.deletelistctrl.names }}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-delete-listreplicapod-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
id: deletelistreppods
runNamespace: {{ $jivapodsns }}
disable: {{ .Config.RetainReplicaData.enabled }}
apiVersion: v1
kind: Pod
action: list
options: |-
labelSelector: openebs.io/replica=jiva-replica,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- $nodesList := jsonpath .JsonResult "{range .items[*]}pkey=nodes,{@.spec.nodeName}={@.spec.volumes[?(@.name=='openebs')].hostPath.path};{end}" | trim | default "" | splitListTrim ";" -}}
{{- $nodesList | keyMap "nodeJRPathList" .ListItems | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-delete-deletereplicadeployment-default
spec:
meta: |
{{- $jivapodsns := .TaskResult.jivapodsinopenebsns.ns | default .Volume.runNamespace -}}
{{- $replicaDeployList := .TaskResult.deletelistrep.names | split " " -}}
id: deletedeleterep
runNamespace: {{ $jivapodsns }}
apiVersion: apps/v1
kind: Deployment
action: delete
repeatWith:
metas:
{{- range $k, $v := $replicaDeployList }}
- objectName: {{ $v | quote }}
{{- end }}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-delete-putreplicascrub-default
spec:
meta: |
apiVersion: batch/v1
runNamespace: {{ .Config.OpenEBSNamespace.value }}
disable: {{ .Config.RetainReplicaData.enabled }}
kind: Job
action: put
id: jivavolumedelreplicascrub
{{- $nodeNames := keys .ListItems.nodeJRPathList.nodes }}
repeatWith:
resources:
{{- range $k, $v := $nodeNames }}
- {{ $v | quote }}
{{- end }}
task: |
kind: Job
apiVersion: batch/v1
metadata:
name: sjr-{{ .Volume.owner }}-{{randAlphaNum 4 |lower }}
labels:
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/cas-type: jiva
spec:
backoffLimit: 4
{{- if kubeVersionGte .CAST.kubeVersion "v1.12.0" }}
ttlSecondsAfterFinished: 0
{{- end }}
template:
spec:
restartPolicy: OnFailure
nodeSelector:
kubernetes.io/hostname: {{ kubeNodeGetHostNameOrNodeName .ListItems.currentRepeatResource }}
volumes:
- name: replica-path
hostPath:
path: {{ pluck .ListItems.currentRepeatResource .ListItems.nodeJRPathList.nodes | first }}
type: ""
containers:
- name: sjr
securityContext:
privileged: true
image: {{ .Config.ScrubImage.value }}
command:
- sh
- -c
- 'rm -rf /mnt/replica/*; sync; date > /mnt/replica/scrubbed.txt; sync;'
volumeMounts:
- mountPath: /mnt/replica
name: replica-path
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | addTo "jivavolumedelreplicascrub.objectName" .TaskResult | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: jiva-volume-delete-output-default
spec:
meta: |
id: deleteoutput
action: output
kind: CASVolume
apiVersion: v1alpha1
task: |
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
---
`
// JivaVolumeArtifacts returns the jiva volume related artifacts corresponding
// to latest version
func JivaVolumeArtifacts() (list artifactList) {
list.Items = append(list.Items, ParseArtifactListFromMultipleYamls(jivaVolumes{})...)
return
}
type jivaVolumes struct{}
// FetchYamls returns all the yamls related to jiva volume in a string
// format
//
// NOTE:
// This is an implementation of MultiYamlFetcher
func (j jivaVolumes) FetchYamls() string {
return jivaVolumeYamls
}
| 1 | 18,313 | What does this contain? Is it PVC namespace or is it openebs? | openebs-maya | go |
@@ -0,0 +1,18 @@
+// Copyright (c) .NET Foundation. All rights reserved.
+// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
+
+namespace Microsoft.AspNetCore.Server.Kestrel.Core.Features
+{
+ /// <summary>
+ /// Represents a minimum data rate for the request body of an HTTP request.
+ /// </summary>
+ public interface IHttpRequestBodyMinimumDataRateFeature
+ {
+ /// <summary>
+ /// The minimum data rate in bytes/second at which the request body should be received.
+ /// Setting this property to null indicates no minimum data rate should be enforced.
+ /// This limit has no effect on upgraded connections which are always unlimited.
+ /// </summary>
+ MinimumDataRate MinimumDataRate { get; set; }
+ }
+} | 1 | 1 | 13,389 | Design note: Using local concrete data types on a feature abstraction makes it hard to generalize / replace. This is tolerable so long as this remains a kestrel exclusive feature, but we'd need a different design if this were ever moved to HttpAbstractions. | aspnet-KestrelHttpServer | .cs |
|
@@ -426,11 +426,12 @@ LGBM_SE LGBM_BoosterGetEvalNames_R(LGBM_SE handle,
LGBM_SE buf_len,
LGBM_SE actual_len,
LGBM_SE eval_names,
- LGBM_SE call_state) {
+ LGBM_SE call_state,
+ LGBM_SE data_idx) {
R_API_BEGIN();
int len;
- CHECK_CALL(LGBM_BoosterGetEvalCounts(R_GET_PTR(handle), &len));
+ CHECK_CALL(LGBM_BoosterGetEvalCounts(R_GET_PTR(handle), R_AS_INT(data_idx), &len));
std::vector<std::vector<char>> names(len);
std::vector<char*> ptr_names(len);
for (int i = 0; i < len; ++i) { | 1 | #include <LightGBM/lightgbm_R.h>
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/text_reader.h>
#include <LightGBM/utils/common.h>
#include <vector>
#include <string>
#include <utility>
#include <cstring>
#include <cstdio>
#include <sstream>
#include <cstdint>
#include <memory>
#define COL_MAJOR (0)
#define R_API_BEGIN() \
try {
#define R_API_END() } \
catch(std::exception& ex) { R_INT_PTR(call_state)[0] = -1; LGBM_SetLastError(ex.what()); return call_state;} \
catch(std::string& ex) { R_INT_PTR(call_state)[0] = -1; LGBM_SetLastError(ex.c_str()); return call_state; } \
catch(...) { R_INT_PTR(call_state)[0] = -1; LGBM_SetLastError("unknown exception"); return call_state;} \
return call_state;
#define CHECK_CALL(x) \
if ((x) != 0) { \
R_INT_PTR(call_state)[0] = -1;\
return call_state;\
}
using namespace LightGBM;
LGBM_SE EncodeChar(LGBM_SE dest, const char* src, LGBM_SE buf_len, LGBM_SE actual_len, size_t str_len) {
if (str_len > INT32_MAX) {
Log::Fatal("Don't support large string in R-package");
}
R_INT_PTR(actual_len)[0] = static_cast<int>(str_len);
if (R_AS_INT(buf_len) < static_cast<int>(str_len)) { return dest; }
auto ptr = R_CHAR_PTR(dest);
std::memcpy(ptr, src, str_len);
return dest;
}
LGBM_SE LGBM_GetLastError_R(LGBM_SE buf_len, LGBM_SE actual_len, LGBM_SE err_msg) {
return EncodeChar(err_msg, LGBM_GetLastError(), buf_len, actual_len, std::strlen(LGBM_GetLastError()) + 1);
}
LGBM_SE LGBM_DatasetCreateFromFile_R(LGBM_SE filename,
LGBM_SE parameters,
LGBM_SE reference,
LGBM_SE out,
LGBM_SE call_state) {
R_API_BEGIN();
DatasetHandle handle = nullptr;
CHECK_CALL(LGBM_DatasetCreateFromFile(R_CHAR_PTR(filename), R_CHAR_PTR(parameters),
R_GET_PTR(reference), &handle));
R_SET_PTR(out, handle);
R_API_END();
}
LGBM_SE LGBM_DatasetCreateFromCSC_R(LGBM_SE indptr,
LGBM_SE indices,
LGBM_SE data,
LGBM_SE num_indptr,
LGBM_SE nelem,
LGBM_SE num_row,
LGBM_SE parameters,
LGBM_SE reference,
LGBM_SE out,
LGBM_SE call_state) {
R_API_BEGIN();
const int* p_indptr = R_INT_PTR(indptr);
const int* p_indices = R_INT_PTR(indices);
const double* p_data = R_REAL_PTR(data);
int64_t nindptr = static_cast<int64_t>(R_AS_INT(num_indptr));
int64_t ndata = static_cast<int64_t>(R_AS_INT(nelem));
int64_t nrow = static_cast<int64_t>(R_AS_INT(num_row));
DatasetHandle handle = nullptr;
CHECK_CALL(LGBM_DatasetCreateFromCSC(p_indptr, C_API_DTYPE_INT32, p_indices,
p_data, C_API_DTYPE_FLOAT64, nindptr, ndata,
nrow, R_CHAR_PTR(parameters), R_GET_PTR(reference), &handle));
R_SET_PTR(out, handle);
R_API_END();
}
LGBM_SE LGBM_DatasetCreateFromMat_R(LGBM_SE data,
LGBM_SE num_row,
LGBM_SE num_col,
LGBM_SE parameters,
LGBM_SE reference,
LGBM_SE out,
LGBM_SE call_state) {
R_API_BEGIN();
int32_t nrow = static_cast<int32_t>(R_AS_INT(num_row));
int32_t ncol = static_cast<int32_t>(R_AS_INT(num_col));
double* p_mat = R_REAL_PTR(data);
DatasetHandle handle = nullptr;
CHECK_CALL(LGBM_DatasetCreateFromMat(p_mat, C_API_DTYPE_FLOAT64, nrow, ncol, COL_MAJOR,
R_CHAR_PTR(parameters), R_GET_PTR(reference), &handle));
R_SET_PTR(out, handle);
R_API_END();
}
LGBM_SE LGBM_DatasetGetSubset_R(LGBM_SE handle,
LGBM_SE used_row_indices,
LGBM_SE len_used_row_indices,
LGBM_SE parameters,
LGBM_SE out,
LGBM_SE call_state) {
R_API_BEGIN();
int len = R_AS_INT(len_used_row_indices);
std::vector<int> idxvec(len);
// convert from one-based to zero-based index
#pragma omp parallel for schedule(static)
for (int i = 0; i < len; ++i) {
idxvec[i] = R_INT_PTR(used_row_indices)[i] - 1;
}
DatasetHandle res = nullptr;
CHECK_CALL(LGBM_DatasetGetSubset(R_GET_PTR(handle),
idxvec.data(), len, R_CHAR_PTR(parameters),
&res));
R_SET_PTR(out, res);
R_API_END();
}
LGBM_SE LGBM_DatasetSetFeatureNames_R(LGBM_SE handle,
LGBM_SE feature_names,
LGBM_SE call_state) {
R_API_BEGIN();
auto vec_names = Common::Split(R_CHAR_PTR(feature_names), '\t');
std::vector<const char*> vec_sptr;
int len = static_cast<int>(vec_names.size());
for (int i = 0; i < len; ++i) {
vec_sptr.push_back(vec_names[i].c_str());
}
CHECK_CALL(LGBM_DatasetSetFeatureNames(R_GET_PTR(handle),
vec_sptr.data(), len));
R_API_END();
}
LGBM_SE LGBM_DatasetGetFeatureNames_R(LGBM_SE handle,
LGBM_SE buf_len,
LGBM_SE actual_len,
LGBM_SE feature_names,
LGBM_SE call_state) {
R_API_BEGIN();
int len = 0;
CHECK_CALL(LGBM_DatasetGetNumFeature(R_GET_PTR(handle), &len));
std::vector<std::vector<char>> names(len);
std::vector<char*> ptr_names(len);
for (int i = 0; i < len; ++i) {
names[i].resize(256);
ptr_names[i] = names[i].data();
}
int out_len;
CHECK_CALL(LGBM_DatasetGetFeatureNames(R_GET_PTR(handle),
ptr_names.data(), &out_len));
CHECK(len == out_len);
auto merge_str = Common::Join<char*>(ptr_names, "\t");
EncodeChar(feature_names, merge_str.c_str(), buf_len, actual_len, merge_str.size() + 1);
R_API_END();
}
LGBM_SE LGBM_DatasetSaveBinary_R(LGBM_SE handle,
LGBM_SE filename,
LGBM_SE call_state) {
R_API_BEGIN();
CHECK_CALL(LGBM_DatasetSaveBinary(R_GET_PTR(handle),
R_CHAR_PTR(filename)));
R_API_END();
}
LGBM_SE LGBM_DatasetFree_R(LGBM_SE handle,
LGBM_SE call_state) {
R_API_BEGIN();
if (R_GET_PTR(handle) != nullptr) {
CHECK_CALL(LGBM_DatasetFree(R_GET_PTR(handle)));
R_SET_PTR(handle, nullptr);
}
R_API_END();
}
LGBM_SE LGBM_DatasetSetField_R(LGBM_SE handle,
LGBM_SE field_name,
LGBM_SE field_data,
LGBM_SE num_element,
LGBM_SE call_state) {
R_API_BEGIN();
int len = static_cast<int>(R_AS_INT(num_element));
const char* name = R_CHAR_PTR(field_name);
if (!strcmp("group", name) || !strcmp("query", name)) {
std::vector<int32_t> vec(len);
#pragma omp parallel for schedule(static)
for (int i = 0; i < len; ++i) {
vec[i] = static_cast<int32_t>(R_INT_PTR(field_data)[i]);
}
CHECK_CALL(LGBM_DatasetSetField(R_GET_PTR(handle), name, vec.data(), len, C_API_DTYPE_INT32));
} else if(!strcmp("init_score", name)) {
CHECK_CALL(LGBM_DatasetSetField(R_GET_PTR(handle), name, R_REAL_PTR(field_data), len, C_API_DTYPE_FLOAT64));
} else {
std::vector<float> vec(len);
#pragma omp parallel for schedule(static)
for (int i = 0; i < len; ++i) {
vec[i] = static_cast<float>(R_REAL_PTR(field_data)[i]);
}
CHECK_CALL(LGBM_DatasetSetField(R_GET_PTR(handle), name, vec.data(), len, C_API_DTYPE_FLOAT32));
}
R_API_END();
}
LGBM_SE LGBM_DatasetGetField_R(LGBM_SE handle,
LGBM_SE field_name,
LGBM_SE field_data,
LGBM_SE call_state) {
R_API_BEGIN();
const char* name = R_CHAR_PTR(field_name);
int out_len = 0;
int out_type = 0;
const void* res;
CHECK_CALL(LGBM_DatasetGetField(R_GET_PTR(handle), name, &out_len, &res, &out_type));
if (!strcmp("group", name) || !strcmp("query", name)) {
auto p_data = reinterpret_cast<const int32_t*>(res);
// convert from boundaries to size
#pragma omp parallel for schedule(static)
for (int i = 0; i < out_len - 1; ++i) {
R_INT_PTR(field_data)[i] = p_data[i + 1] - p_data[i];
}
} else if (!strcmp("init_score", name)) {
auto p_data = reinterpret_cast<const double*>(res);
#pragma omp parallel for schedule(static)
for (int i = 0; i < out_len; ++i) {
R_REAL_PTR(field_data)[i] = p_data[i];
}
} else {
auto p_data = reinterpret_cast<const float*>(res);
#pragma omp parallel for schedule(static)
for (int i = 0; i < out_len; ++i) {
R_REAL_PTR(field_data)[i] = p_data[i];
}
}
R_API_END();
}
LGBM_SE LGBM_DatasetGetFieldSize_R(LGBM_SE handle,
LGBM_SE field_name,
LGBM_SE out,
LGBM_SE call_state) {
R_API_BEGIN();
const char* name = R_CHAR_PTR(field_name);
int out_len = 0;
int out_type = 0;
const void* res;
CHECK_CALL(LGBM_DatasetGetField(R_GET_PTR(handle), name, &out_len, &res, &out_type));
if (!strcmp("group", name) || !strcmp("query", name)) {
out_len -= 1;
}
R_INT_PTR(out)[0] = static_cast<int>(out_len);
R_API_END();
}
LGBM_SE LGBM_DatasetGetNumData_R(LGBM_SE handle, LGBM_SE out,
LGBM_SE call_state) {
int nrow;
R_API_BEGIN();
CHECK_CALL(LGBM_DatasetGetNumData(R_GET_PTR(handle), &nrow));
R_INT_PTR(out)[0] = static_cast<int>(nrow);
R_API_END();
}
LGBM_SE LGBM_DatasetGetNumFeature_R(LGBM_SE handle,
LGBM_SE out,
LGBM_SE call_state) {
int nfeature;
R_API_BEGIN();
CHECK_CALL(LGBM_DatasetGetNumFeature(R_GET_PTR(handle), &nfeature));
R_INT_PTR(out)[0] = static_cast<int>(nfeature);
R_API_END();
}
// --- start Booster interfaces
LGBM_SE LGBM_BoosterFree_R(LGBM_SE handle,
LGBM_SE call_state) {
R_API_BEGIN();
if (R_GET_PTR(handle) != nullptr) {
CHECK_CALL(LGBM_BoosterFree(R_GET_PTR(handle)));
R_SET_PTR(handle, nullptr);
}
R_API_END();
}
LGBM_SE LGBM_BoosterCreate_R(LGBM_SE train_data,
LGBM_SE parameters,
LGBM_SE out,
LGBM_SE call_state) {
R_API_BEGIN();
BoosterHandle handle = nullptr;
CHECK_CALL(LGBM_BoosterCreate(R_GET_PTR(train_data), R_CHAR_PTR(parameters), &handle));
R_SET_PTR(out, handle);
R_API_END();
}
LGBM_SE LGBM_BoosterCreateFromModelfile_R(LGBM_SE filename,
LGBM_SE out,
LGBM_SE call_state) {
R_API_BEGIN();
int out_num_iterations = 0;
BoosterHandle handle = nullptr;
CHECK_CALL(LGBM_BoosterCreateFromModelfile(R_CHAR_PTR(filename), &out_num_iterations, &handle));
R_SET_PTR(out, handle);
R_API_END();
}
LGBM_SE LGBM_BoosterLoadModelFromString_R(LGBM_SE model_str,
LGBM_SE out,
LGBM_SE call_state) {
R_API_BEGIN();
int out_num_iterations = 0;
BoosterHandle handle = nullptr;
CHECK_CALL(LGBM_BoosterLoadModelFromString(R_CHAR_PTR(model_str), &out_num_iterations, &handle));
R_SET_PTR(out, handle);
R_API_END();
}
LGBM_SE LGBM_BoosterMerge_R(LGBM_SE handle,
LGBM_SE other_handle,
LGBM_SE call_state) {
R_API_BEGIN();
CHECK_CALL(LGBM_BoosterMerge(R_GET_PTR(handle), R_GET_PTR(other_handle)));
R_API_END();
}
LGBM_SE LGBM_BoosterAddValidData_R(LGBM_SE handle,
LGBM_SE valid_data,
LGBM_SE call_state) {
R_API_BEGIN();
CHECK_CALL(LGBM_BoosterAddValidData(R_GET_PTR(handle), R_GET_PTR(valid_data)));
R_API_END();
}
LGBM_SE LGBM_BoosterResetTrainingData_R(LGBM_SE handle,
LGBM_SE train_data,
LGBM_SE call_state) {
R_API_BEGIN();
CHECK_CALL(LGBM_BoosterResetTrainingData(R_GET_PTR(handle), R_GET_PTR(train_data)));
R_API_END();
}
LGBM_SE LGBM_BoosterResetParameter_R(LGBM_SE handle,
LGBM_SE parameters,
LGBM_SE call_state) {
R_API_BEGIN();
CHECK_CALL(LGBM_BoosterResetParameter(R_GET_PTR(handle), R_CHAR_PTR(parameters)));
R_API_END();
}
LGBM_SE LGBM_BoosterGetNumClasses_R(LGBM_SE handle,
LGBM_SE out,
LGBM_SE call_state) {
int num_class;
R_API_BEGIN();
CHECK_CALL(LGBM_BoosterGetNumClasses(R_GET_PTR(handle), &num_class));
R_INT_PTR(out)[0] = static_cast<int>(num_class);
R_API_END();
}
LGBM_SE LGBM_BoosterUpdateOneIter_R(LGBM_SE handle,
LGBM_SE call_state) {
int is_finished = 0;
R_API_BEGIN();
CHECK_CALL(LGBM_BoosterUpdateOneIter(R_GET_PTR(handle), &is_finished));
R_API_END();
}
LGBM_SE LGBM_BoosterUpdateOneIterCustom_R(LGBM_SE handle,
LGBM_SE grad,
LGBM_SE hess,
LGBM_SE len,
LGBM_SE call_state) {
int is_finished = 0;
R_API_BEGIN();
int int_len = R_AS_INT(len);
std::vector<float> tgrad(int_len), thess(int_len);
#pragma omp parallel for schedule(static)
for (int j = 0; j < int_len; ++j) {
tgrad[j] = static_cast<float>(R_REAL_PTR(grad)[j]);
thess[j] = static_cast<float>(R_REAL_PTR(hess)[j]);
}
CHECK_CALL(LGBM_BoosterUpdateOneIterCustom(R_GET_PTR(handle), tgrad.data(), thess.data(), &is_finished));
R_API_END();
}
LGBM_SE LGBM_BoosterRollbackOneIter_R(LGBM_SE handle,
LGBM_SE call_state) {
R_API_BEGIN();
CHECK_CALL(LGBM_BoosterRollbackOneIter(R_GET_PTR(handle)));
R_API_END();
}
LGBM_SE LGBM_BoosterGetCurrentIteration_R(LGBM_SE handle,
LGBM_SE out,
LGBM_SE call_state) {
int out_iteration;
R_API_BEGIN();
CHECK_CALL(LGBM_BoosterGetCurrentIteration(R_GET_PTR(handle), &out_iteration));
R_INT_PTR(out)[0] = static_cast<int>(out_iteration);
R_API_END();
}
LGBM_SE LGBM_BoosterGetEvalNames_R(LGBM_SE handle,
LGBM_SE buf_len,
LGBM_SE actual_len,
LGBM_SE eval_names,
LGBM_SE call_state) {
R_API_BEGIN();
int len;
CHECK_CALL(LGBM_BoosterGetEvalCounts(R_GET_PTR(handle), &len));
std::vector<std::vector<char>> names(len);
std::vector<char*> ptr_names(len);
for (int i = 0; i < len; ++i) {
names[i].resize(128);
ptr_names[i] = names[i].data();
}
int out_len;
CHECK_CALL(LGBM_BoosterGetEvalNames(R_GET_PTR(handle), &out_len, ptr_names.data()));
CHECK(out_len == len);
auto merge_names = Common::Join<char*>(ptr_names, "\t");
EncodeChar(eval_names, merge_names.c_str(), buf_len, actual_len, merge_names.size() + 1);
R_API_END();
}
LGBM_SE LGBM_BoosterGetEval_R(LGBM_SE handle,
LGBM_SE data_idx,
LGBM_SE out_result,
LGBM_SE call_state) {
R_API_BEGIN();
int len;
CHECK_CALL(LGBM_BoosterGetEvalCounts(R_GET_PTR(handle), &len));
double* ptr_ret = R_REAL_PTR(out_result);
int out_len;
CHECK_CALL(LGBM_BoosterGetEval(R_GET_PTR(handle), R_AS_INT(data_idx), &out_len, ptr_ret));
CHECK(out_len == len);
R_API_END();
}
LGBM_SE LGBM_BoosterGetNumPredict_R(LGBM_SE handle,
LGBM_SE data_idx,
LGBM_SE out,
LGBM_SE call_state) {
R_API_BEGIN();
int64_t len;
CHECK_CALL(LGBM_BoosterGetNumPredict(R_GET_PTR(handle), R_AS_INT(data_idx), &len));
R_INT_PTR(out)[0] = static_cast<int>(len);
R_API_END();
}
LGBM_SE LGBM_BoosterGetPredict_R(LGBM_SE handle,
LGBM_SE data_idx,
LGBM_SE out_result,
LGBM_SE call_state) {
R_API_BEGIN();
double* ptr_ret = R_REAL_PTR(out_result);
int64_t out_len;
CHECK_CALL(LGBM_BoosterGetPredict(R_GET_PTR(handle), R_AS_INT(data_idx), &out_len, ptr_ret));
R_API_END();
}
int GetPredictType(LGBM_SE is_rawscore, LGBM_SE is_leafidx, LGBM_SE is_predcontrib) {
int pred_type = C_API_PREDICT_NORMAL;
if (R_AS_INT(is_rawscore)) {
pred_type = C_API_PREDICT_RAW_SCORE;
}
if (R_AS_INT(is_leafidx)) {
pred_type = C_API_PREDICT_LEAF_INDEX;
}
if (R_AS_INT(is_predcontrib)) {
pred_type = C_API_PREDICT_CONTRIB;
}
return pred_type;
}
LGBM_SE LGBM_BoosterPredictForFile_R(LGBM_SE handle,
LGBM_SE data_filename,
LGBM_SE data_has_header,
LGBM_SE is_rawscore,
LGBM_SE is_leafidx,
LGBM_SE is_predcontrib,
LGBM_SE num_iteration,
LGBM_SE parameter,
LGBM_SE result_filename,
LGBM_SE call_state) {
R_API_BEGIN();
int pred_type = GetPredictType(is_rawscore, is_leafidx, is_predcontrib);
CHECK_CALL(LGBM_BoosterPredictForFile(R_GET_PTR(handle), R_CHAR_PTR(data_filename),
R_AS_INT(data_has_header), pred_type, R_AS_INT(num_iteration), R_CHAR_PTR(parameter),
R_CHAR_PTR(result_filename)));
R_API_END();
}
LGBM_SE LGBM_BoosterCalcNumPredict_R(LGBM_SE handle,
LGBM_SE num_row,
LGBM_SE is_rawscore,
LGBM_SE is_leafidx,
LGBM_SE is_predcontrib,
LGBM_SE num_iteration,
LGBM_SE out_len,
LGBM_SE call_state) {
R_API_BEGIN();
int pred_type = GetPredictType(is_rawscore, is_leafidx, is_predcontrib);
int64_t len = 0;
CHECK_CALL(LGBM_BoosterCalcNumPredict(R_GET_PTR(handle), R_AS_INT(num_row),
pred_type, R_AS_INT(num_iteration), &len));
R_INT_PTR(out_len)[0] = static_cast<int>(len);
R_API_END();
}
LGBM_SE LGBM_BoosterPredictForCSC_R(LGBM_SE handle,
LGBM_SE indptr,
LGBM_SE indices,
LGBM_SE data,
LGBM_SE num_indptr,
LGBM_SE nelem,
LGBM_SE num_row,
LGBM_SE is_rawscore,
LGBM_SE is_leafidx,
LGBM_SE is_predcontrib,
LGBM_SE num_iteration,
LGBM_SE parameter,
LGBM_SE out_result,
LGBM_SE call_state) {
R_API_BEGIN();
int pred_type = GetPredictType(is_rawscore, is_leafidx, is_predcontrib);
const int* p_indptr = R_INT_PTR(indptr);
const int* p_indices = R_INT_PTR(indices);
const double* p_data = R_REAL_PTR(data);
int64_t nindptr = R_AS_INT(num_indptr);
int64_t ndata = R_AS_INT(nelem);
int64_t nrow = R_AS_INT(num_row);
double* ptr_ret = R_REAL_PTR(out_result);
int64_t out_len;
CHECK_CALL(LGBM_BoosterPredictForCSC(R_GET_PTR(handle),
p_indptr, C_API_DTYPE_INT32, p_indices,
p_data, C_API_DTYPE_FLOAT64, nindptr, ndata,
nrow, pred_type, R_AS_INT(num_iteration), R_CHAR_PTR(parameter), &out_len, ptr_ret));
R_API_END();
}
LGBM_SE LGBM_BoosterPredictForMat_R(LGBM_SE handle,
LGBM_SE data,
LGBM_SE num_row,
LGBM_SE num_col,
LGBM_SE is_rawscore,
LGBM_SE is_leafidx,
LGBM_SE is_predcontrib,
LGBM_SE num_iteration,
LGBM_SE parameter,
LGBM_SE out_result,
LGBM_SE call_state) {
R_API_BEGIN();
int pred_type = GetPredictType(is_rawscore, is_leafidx, is_predcontrib);
int32_t nrow = R_AS_INT(num_row);
int32_t ncol = R_AS_INT(num_col);
double* p_mat = R_REAL_PTR(data);
double* ptr_ret = R_REAL_PTR(out_result);
int64_t out_len;
CHECK_CALL(LGBM_BoosterPredictForMat(R_GET_PTR(handle),
p_mat, C_API_DTYPE_FLOAT64, nrow, ncol, COL_MAJOR,
pred_type, R_AS_INT(num_iteration), R_CHAR_PTR(parameter), &out_len, ptr_ret));
R_API_END();
}
LGBM_SE LGBM_BoosterSaveModel_R(LGBM_SE handle,
LGBM_SE num_iteration,
LGBM_SE filename,
LGBM_SE call_state) {
R_API_BEGIN();
CHECK_CALL(LGBM_BoosterSaveModel(R_GET_PTR(handle), 0, R_AS_INT(num_iteration), R_CHAR_PTR(filename)));
R_API_END();
}
LGBM_SE LGBM_BoosterSaveModelToString_R(LGBM_SE handle,
LGBM_SE num_iteration,
LGBM_SE buffer_len,
LGBM_SE actual_len,
LGBM_SE out_str,
LGBM_SE call_state) {
R_API_BEGIN();
int64_t out_len = 0;
std::vector<char> inner_char_buf(R_AS_INT(buffer_len));
CHECK_CALL(LGBM_BoosterSaveModelToString(R_GET_PTR(handle), 0, R_AS_INT(num_iteration), R_AS_INT(buffer_len), &out_len, inner_char_buf.data()));
EncodeChar(out_str, inner_char_buf.data(), buffer_len, actual_len, static_cast<size_t>(out_len));
R_API_END();
}
LGBM_SE LGBM_BoosterDumpModel_R(LGBM_SE handle,
LGBM_SE num_iteration,
LGBM_SE buffer_len,
LGBM_SE actual_len,
LGBM_SE out_str,
LGBM_SE call_state) {
R_API_BEGIN();
int64_t out_len = 0;
std::vector<char> inner_char_buf(R_AS_INT(buffer_len));
CHECK_CALL(LGBM_BoosterDumpModel(R_GET_PTR(handle), 0, R_AS_INT(num_iteration), R_AS_INT(buffer_len), &out_len, inner_char_buf.data()));
EncodeChar(out_str, inner_char_buf.data(), buffer_len, actual_len, static_cast<size_t>(out_len));
R_API_END();
}
| 1 | 19,549 | this will break R package, you should update the R files accordingly. BTW, the call state should be in the last argument. | microsoft-LightGBM | cpp |
@@ -182,9 +182,13 @@ final class MediaController
public function getMediumFormatsAction($id): FOSRestView
{
$media = $this->getMedium($id);
+ $mediaContext = $media->getContext();
$formats = [MediaProviderInterface::FORMAT_REFERENCE];
- $formats = array_merge($formats, array_keys($this->mediaPool->getFormatNamesByContext($media->getContext())));
+
+ if (null !== $mediaContext) {
+ $formats = array_merge($formats, array_keys($this->mediaPool->getFormatNamesByContext($mediaContext)));
+ }
$provider = $this->mediaPool->getProvider($media->getProviderName());
| 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Controller\Api;
use FOS\RestBundle\Context\Context;
use FOS\RestBundle\Controller\Annotations as Rest;
use FOS\RestBundle\Request\ParamFetcherInterface;
use FOS\RestBundle\View\View as FOSRestView;
use Nelmio\ApiDocBundle\Annotation\Model;
use Nelmio\ApiDocBundle\Annotation\Operation;
use Sonata\DatagridBundle\Pager\PagerInterface;
use Sonata\MediaBundle\Form\Type\ApiMediaType;
use Sonata\MediaBundle\Model\MediaInterface;
use Sonata\MediaBundle\Model\MediaManagerInterface;
use Sonata\MediaBundle\Provider\MediaProviderInterface;
use Sonata\MediaBundle\Provider\Pool;
use Swagger\Annotations as SWG;
use Symfony\Component\Form\FormFactoryInterface;
use Symfony\Component\Form\FormInterface;
use Symfony\Component\HttpFoundation\BinaryFileResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
use Symfony\Component\Security\Core\Exception\AccessDeniedException;
/**
* Note: Media is plural, medium is singular (at least according to FOSRestBundle route generator).
*
* @author Hugo Briand <[email protected]>
*/
final class MediaController
{
/**
* @var MediaManagerInterface
*/
private $mediaManager;
/**
* @var Pool
*/
private $mediaPool;
/**
* @var FormFactoryInterface
*/
private $formFactory;
public function __construct(MediaManagerInterface $mediaManager, Pool $mediaPool, FormFactoryInterface $formFactory)
{
$this->mediaManager = $mediaManager;
$this->mediaPool = $mediaPool;
$this->formFactory = $formFactory;
}
/**
* Retrieves a specific medium.
*
* @Operation(
* tags={"/api/media/media"},
* summary="Retrieves a specific medium.",
* @SWG\Response(
* response="200",
* description="Returned when successful",
* @SWG\Schema(ref=@Model(type="Sonata\MediaBundle\Model\Media"))
* ),
* @SWG\Response(
* response="404",
* description="Returned when medium is not found"
* )
* )
*
* @Rest\View(serializerGroups={"sonata_api_read"}, serializerEnableMaxDepthChecks=true)
*
* @param int|string $id Medium identifier
*/
public function getMediumAction($id): MediaInterface
{
return $this->getMedium($id);
}
/**
* Retrieves the list of media (paginated).
*
* @Operation(
* tags={"/api/media/media"},
* summary="Retrieves the list of media (paginated).",
* @SWG\Parameter(
* name="page",
* in="query",
* description="Page for media list pagination",
* required=false,
* type="string"
* ),
* @SWG\Parameter(
* name="count",
* in="query",
* description="Number of medias per page",
* required=false,
* type="string"
* ),
* @SWG\Parameter(
* name="enabled",
* in="query",
* description="Enables or disables the medias filter",
* required=false,
* type="string"
* ),
* @SWG\Parameter(
* name="orderBy",
* in="query",
* description="Order by array (key is field, value is direction)",
* required=false,
* type="string"
* ),
* @SWG\Response(
* response="200",
* description="Returned when successful",
* @SWG\Schema(ref=@Model(type="Sonata\DatagridBundle\Pager\PagerInterface"))
* )
* )
*
* @Rest\QueryParam(name="page", requirements="\d+", default="1", description="Page for media list pagination")
* @Rest\QueryParam(name="count", requirements="\d+", default="10", description="Number of medias per page")
* @Rest\QueryParam(name="enabled", requirements="0|1", nullable=true, strict=true, description="Enables or disables the medias filter")
* @Rest\QueryParam(name="orderBy", map=true, requirements="ASC|DESC", nullable=true, strict=true, description="Order by array (key is field, value is direction)")
*
* @Rest\View(serializerGroups={"sonata_api_read"}, serializerEnableMaxDepthChecks=true)
*/
public function getMediaAction(ParamFetcherInterface $paramFetcher): PagerInterface
{
$supportedCriteria = [
'enabled' => '',
];
$page = $paramFetcher->get('page');
$limit = $paramFetcher->get('count');
$sort = $paramFetcher->get('orderBy');
$criteria = array_intersect_key($paramFetcher->all(), $supportedCriteria);
$criteria = array_filter($criteria, static function ($value): bool {
return null !== $value;
});
if (!$sort) {
$sort = [];
} elseif (!\is_array($sort)) {
$sort = [$sort => 'asc'];
}
return $this->mediaManager->getPager($criteria, (int) $page, (int) $limit, $sort);
}
/**
* Returns medium urls for each format.
*
* @Operation(
* tags={"/api/media/media"},
* summary="Returns medium urls for each format.",
* @SWG\Response(
* response="200",
* description="Returned when successful"
* ),
* @SWG\Response(
* response="404",
* description="Returned when medium is not found"
* )
* )
*
* @param int|string $id Medium identifier
*/
public function getMediumFormatsAction($id): FOSRestView
{
$media = $this->getMedium($id);
$formats = [MediaProviderInterface::FORMAT_REFERENCE];
$formats = array_merge($formats, array_keys($this->mediaPool->getFormatNamesByContext($media->getContext())));
$provider = $this->mediaPool->getProvider($media->getProviderName());
$properties = [];
foreach ($formats as $format) {
$properties[$format] = [
'url' => $provider->generatePublicUrl($media, $format),
'properties' => $provider->getHelperProperties($media, $format),
];
}
return FOSRestView::create($properties);
}
/**
* Returns medium binary content for each format.
*
* @Operation(
* tags={"/api/media/media"},
* summary="Returns medium binary content for each format.",
* @SWG\Response(
* response="200",
* description="Returned when successful"
* ),
* @SWG\Response(
* response="404",
* description="Returned when medium is not found"
* )
* )
*
* @param int|string $id Medium identifier
*/
public function getMediumBinaryAction($id, string $format, Request $request): Response
{
$media = $this->getMedium($id);
$response = $this->mediaPool->getProvider($media->getProviderName())->getDownloadResponse($media, $format, $this->mediaPool->getDownloadMode($media));
if ($response instanceof BinaryFileResponse) {
$response->prepare($request);
}
return $response;
}
/**
* Deletes a medium.
*
* @Operation(
* tags={"/api/media/media"},
* summary="Retrieves a specific medium.",
* @SWG\Response(
* response="200",
* description="Returned when medium is successfully deleted"
* ),
* @SWG\Response(
* response="400",
* description="Returned when an error has occurred while deleting the medium"
* ),
* @SWG\Response(
* response="404",
* description="Returned when unable to find medium"
* )
* )
*
* @param int|string $id Medium identifier
*
* @throws NotFoundHttpException
*/
public function deleteMediumAction($id): FOSRestView
{
$medium = $this->getMedium($id);
$this->mediaManager->delete($medium);
return FOSRestView::create(['deleted' => true]);
}
/**
* Updates a medium.
*
* If you need to upload a file (depends on the provider) you will need to do so by sending content as a multipart/form-data HTTP Request
* See documentation for more details.
*
* @Operation(
* tags={"/api/media/media"},
* summary="Retrieves the list of media (paginated).",
* @SWG\Response(
* response="200",
* description="Returned when successful",
* @SWG\Schema(ref=@Model(type="Sonata\MediaBundle\Model\Media"))
* ),
* @SWG\Response(
* response="400",
* description="Returned when an error has occurred while medium update"
* ),
* @SWG\Response(
* response="404",
* description="Returned when unable to find medium"
* )
* )
*
* @param int|string $id Medium identifier
*
* @throws NotFoundHttpException
*
* @return FOSRestView|FormInterface
*/
public function putMediumAction($id, Request $request)
{
$medium = $this->getMedium($id);
try {
$provider = $this->mediaPool->getProvider($medium->getProviderName());
} catch (\RuntimeException | \InvalidArgumentException $ex) {
throw new NotFoundHttpException($ex->getMessage(), $ex);
}
return $this->handleWriteMedium($request, $medium, $provider);
}
/**
* Adds a medium of given provider.
*
* If you need to upload a file (depends on the provider) you will need to do so by sending content as a multipart/form-data HTTP Request
* See documentation for more details.
*
* @Operation(
* tags={"/api/media/media"},
* summary="Returns medium urls for each format.",
* @SWG\Response(
* response="200",
* description="Returned when successful",
* @SWG\Schema(ref=@Model(type="Sonata\MediaBundle\Model\Media"))
* ),
* @SWG\Response(
* response="400",
* description="Returned when an error has occurred while medium creation"
* ),
* @SWG\Response(
* response="404",
* description="Returned when unable to find medium"
* )
* )
*
* @throws NotFoundHttpException
*
* @return FOSRestView|FormInterface
*/
public function postProviderMediumAction(string $provider, Request $request)
{
$medium = $this->mediaManager->create();
$medium->setProviderName($provider);
try {
$mediaProvider = $this->mediaPool->getProvider($provider);
} catch (\RuntimeException | \InvalidArgumentException $ex) {
throw new NotFoundHttpException($ex->getMessage(), $ex);
}
return $this->handleWriteMedium($request, $medium, $mediaProvider);
}
/**
* Set Binary content for a media.
*
* @Operation(
* tags={"/api/media/media"},
* summary="Returns medium binary content for each format.",
* @SWG\Response(
* response="200",
* description="Returned when successful",
* @SWG\Schema(ref=@Model(type="Sonata\MediaBundle\Model\Media"))
* ),
* @SWG\Response(
* response="404",
* description="Returned when media is not found"
* )
* )
*
* @Rest\View(serializerGroups={"sonata_api_read"}, serializerEnableMaxDepthChecks=true)
*
* @param int|string $id Medium identifier
*
* @throws NotFoundHttpException
*/
public function putMediumBinaryContentAction($id, Request $request): MediaInterface
{
$media = $this->getMedium($id);
$media->setBinaryContent($request);
$this->mediaManager->save($media);
return $media;
}
/**
* Retrieves media with identifier $id or throws an exception if not found.
*
* @param int|string $id Media identifier
*
* @throws AccessDeniedException
* @throws NotFoundHttpException
*/
private function getMedium($id = null): MediaInterface
{
$media = $this->mediaManager->find($id);
if (null === $media) {
throw new NotFoundHttpException(sprintf('Media not found for identifier %s.', var_export($id, true)));
}
return $media;
}
/**
* Write a medium, this method is used by both POST and PUT action methods.
*
* @return FOSRestView|FormInterface
*/
private function handleWriteMedium(Request $request, MediaInterface $media, MediaProviderInterface $provider)
{
$form = $this->formFactory->createNamed('', ApiMediaType::class, $media, [
'provider_name' => $provider->getName(),
'csrf_protection' => false,
]);
$form->handleRequest($request);
if ($form->isValid()) {
$media = $form->getData();
$this->mediaManager->save($media);
$context = new Context();
$context->setGroups(['sonata_api_read']);
$context->enableMaxDepth();
$view = FOSRestView::create($media);
$view->setContext($context);
return $view;
}
return $form;
}
}
| 1 | 12,155 | Shouldn't we throw an exception if the context is null instead ? | sonata-project-SonataMediaBundle | php |
@@ -13,6 +13,9 @@ var DdevVersion = "v0.3.0-dev" // Note that this is overridden by make
// for examples defining version constraints.
var DockerVersionConstraint = ">= 17.05.0-ce"
+// DockerComposeVersionConstraint is the current minimum version of docker-compose required for ddev.
+var DockerComposeVersionConstraint = ">= 1.10.0"
+
// WebImg defines the default web image used for applications.
var WebImg = "drud/nginx-php-fpm-local" // Note that this is overridden by make
| 1 | package version
// VERSION is supplied with the git committish this is built from
var VERSION = ""
// IMPORTANT: These versions are overridden by version ldflags specifications VERSION_VARIABLES in the Makefile
// DdevVersion is the current version of ddev, by default the git committish (should be current git tag)
var DdevVersion = "v0.3.0-dev" // Note that this is overridden by make
// DockerVersionConstraint is the current minimum version of docker required for ddev.
// See https://godoc.org/github.com/Masterminds/semver#hdr-Checking_Version_Constraints
// for examples defining version constraints.
var DockerVersionConstraint = ">= 17.05.0-ce"
// WebImg defines the default web image used for applications.
var WebImg = "drud/nginx-php-fpm-local" // Note that this is overridden by make
// WebTag defines the default web image tag for drud dev
var WebTag = "v0.8.0" // Note that this is overridden by make
// DBImg defines the default db image used for applications.
var DBImg = "drud/mysql-local-57" // Note that this is overridden by make
// DBTag defines the default db image tag for drud dev
var DBTag = "v0.6.3" // Note that this is overridden by make
// DBAImg defines the default phpmyadmin image tag used for applications.
var DBAImg = "drud/phpmyadmin"
// DBATag defines the default phpmyadmin image tag used for applications.
var DBATag = "v0.2.0"
// RouterImage defines the image used for the router.
var RouterImage = "drud/ddev-router" // Note that this is overridden by make
// RouterTag defines the tag used for the router.
var RouterTag = "v0.4.3" // Note that this is overridden by make
// COMMIT is the actual committish, supplied by make
var COMMIT = "COMMIT should be overridden"
// BUILDINFO is information with date and context, supplied by make
var BUILDINFO = "BUILDINFO should have new info"
| 1 | 11,996 | These should both be const, not var right? | drud-ddev | php |
@@ -0,0 +1,8 @@
+from mmcv.utils import Registry, build_from_cfg
+
+MATCH_COST = Registry('Match Cost')
+
+
+def build_match_cost(cfg, default_args=None):
+ """Builder of IoU calculator."""
+ return build_from_cfg(cfg, MATCH_COST, default_args) | 1 | 1 | 21,971 | Rename the directory, match_costers -> match_costs | open-mmlab-mmdetection | py |
|
@@ -64,6 +64,13 @@ class DeleteModelTest(BaseTest):
records = result['data']
self.assertEqual(len(records), 1)
+ def test_delete_collection_gives_number_of_deletable_records_in_headers(self):
+ self.resource.request.GET = {'_limit': '1'}
+ self.resource.collection_delete()
+ headers = self.last_response.headers
+ count = headers['Total-Records']
+ self.assertEquals(int(count), 2)
+
class IsolatedModelsTest(BaseTest):
def setUp(self): | 1 | from pyramid import httpexceptions
from . import BaseTest
class ModelTest(BaseTest):
def setUp(self):
super(ModelTest, self).setUp()
self.record = self.model.create_record({'field': 'value'})
def test_list_gives_number_of_results_in_headers(self):
self.resource.collection_get()
headers = self.last_response.headers
count = headers['Total-Records']
self.assertEquals(int(count), 1)
def test_list_returns_all_records_in_data(self):
result = self.resource.collection_get()
records = result['data']
self.assertEqual(len(records), 1)
self.assertDictEqual(records[0], self.record)
class CreateTest(BaseTest):
def setUp(self):
super(CreateTest, self).setUp()
self.resource.request.validated = {'body': {'data': {'field': 'new'}}}
def test_new_records_are_linked_to_owner(self):
resp = self.resource.collection_post()['data']
record_id = resp['id']
self.model.get_record(record_id) # not raising
def test_create_record_returns_at_least_id_and_last_modified(self):
record = self.resource.collection_post()['data']
self.assertIn(self.resource.model.id_field, record)
self.assertIn(self.resource.model.modified_field, record)
self.assertIn('field', record)
class DeleteModelTest(BaseTest):
def setUp(self):
super(DeleteModelTest, self).setUp()
self.patch_known_field.start()
self.model.create_record({'field': 'a'})
self.model.create_record({'field': 'b'})
def test_delete_on_list_removes_all_records(self):
self.resource.collection_delete()
result = self.resource.collection_get()
records = result['data']
self.assertEqual(len(records), 0)
def test_delete_returns_deleted_version_of_records(self):
result = self.resource.collection_delete()
deleted = result['data'][0]
self.assertIn('deleted', deleted)
def test_delete_supports_collection_filters(self):
self.resource.request.GET = {'field': 'a'}
self.resource.collection_delete()
self.resource.request.GET = {}
result = self.resource.collection_get()
records = result['data']
self.assertEqual(len(records), 1)
class IsolatedModelsTest(BaseTest):
def setUp(self):
super(IsolatedModelsTest, self).setUp()
self.stored = self.model.create_record({}, parent_id='bob')
self.resource.record_id = self.stored['id']
def get_request(self):
request = super(IsolatedModelsTest, self).get_request()
request.prefixed_userid = 'basicauth:alice'
return request
def get_context(self):
context = super(IsolatedModelsTest, self).get_context()
context.prefixed_userid = 'basicauth:alice'
return context
def test_list_is_filtered_by_user(self):
resp = self.resource.collection_get()
records = resp['data']
self.assertEquals(len(records), 0)
def test_update_record_of_another_user_will_create_it(self):
self.resource.request.validated = {'body': {'data': {'some': 'record'}}}
self.resource.put()
self.model.get_record(record_id=self.stored['id'],
parent_id='basicauth:alice') # not raising
def test_cannot_modify_record_of_other_user(self):
self.assertRaises(httpexceptions.HTTPNotFound, self.resource.patch)
def test_cannot_delete_record_of_other_user(self):
self.assertRaises(httpexceptions.HTTPNotFound, self.resource.delete)
| 1 | 10,377 | I don't think this should go in the model tests, since it is done in the resource. `PaginatedDeleteTest` seems more appropriate | Kinto-kinto | py |
@@ -273,7 +273,7 @@ public class HttpCommandExecutor implements CommandExecutor, NeedsLocalLogs {
}
if (!GET_ALL_SESSIONS.equals(command.getName())
&& !NEW_SESSION.equals(command.getName())) {
- throw new SessionNotFoundException("Session ID is null");
+ throw new SessionNotFoundException("Session ID is null. Using WebDriver after calling quit()?");
}
}
| 1 | /*
Copyright 2007-2011 Selenium committers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.selenium.remote;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.NoHttpResponseException;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.params.HttpClientParams;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.CoreConnectionPNames;
import org.apache.http.params.HttpParams;
import org.apache.http.protocol.BasicHttpContext;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
import org.openqa.selenium.UnsupportedCommandException;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.logging.LocalLogs;
import org.openqa.selenium.logging.LogEntry;
import org.openqa.selenium.logging.LogType;
import org.openqa.selenium.logging.NeedsLocalLogs;
import org.openqa.selenium.logging.profiler.HttpProfilerLogEntry;
import org.openqa.selenium.remote.internal.HttpClientFactory;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.BindException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.Map;
import static org.apache.http.protocol.ExecutionContext.HTTP_TARGET_HOST;
import static org.openqa.selenium.remote.DriverCommand.*;
public class HttpCommandExecutor implements CommandExecutor, NeedsLocalLogs {
private static final int MAX_REDIRECTS = 10;
private final HttpHost targetHost;
private final URL remoteServer;
private final Map<String, CommandInfo> nameToUrl;
private final HttpClient client;
private final ErrorCodes errorCodes = new ErrorCodes();
private static HttpClientFactory httpClientFactory;
private LocalLogs logs = LocalLogs.getNullLogger();
public HttpCommandExecutor(URL addressOfRemoteServer) {
this(ImmutableMap.<String, CommandInfo>of(), addressOfRemoteServer);
}
public HttpCommandExecutor(Map<String, CommandInfo> additionalCommands, URL addressOfRemoteServer) {
try {
remoteServer = addressOfRemoteServer == null ?
new URL(System.getProperty("webdriver.remote.server", "http://localhost:4444/wd/hub")) :
addressOfRemoteServer;
} catch (MalformedURLException e) {
throw new WebDriverException(e);
}
HttpParams params = new BasicHttpParams();
// Use the JRE default for the socket linger timeout.
params.setParameter(CoreConnectionPNames.SO_LINGER, -1);
HttpClientParams.setRedirecting(params, false);
synchronized (HttpCommandExecutor.class) {
if (httpClientFactory == null) {
httpClientFactory = new HttpClientFactory();
}
}
client = httpClientFactory.getHttpClient();
if (addressOfRemoteServer != null && addressOfRemoteServer.getUserInfo() != null) {
// Use HTTP Basic auth
UsernamePasswordCredentials credentials = new
UsernamePasswordCredentials(addressOfRemoteServer.getUserInfo());
((DefaultHttpClient) client).getCredentialsProvider().
setCredentials(AuthScope.ANY, credentials);
}
// Some machines claim "localhost.localdomain" is the same as "localhost".
// This assumption is not always true.
String host = remoteServer.getHost().replace(".localdomain", "");
targetHost = new HttpHost(
host, remoteServer.getPort(), remoteServer.getProtocol());
ImmutableMap.Builder<String, CommandInfo> builder = ImmutableMap.builder();
for (Map.Entry<String, CommandInfo> entry : additionalCommands.entrySet()) {
builder.put(entry.getKey(), entry.getValue());
}
builder
.put(GET_ALL_SESSIONS, get("/sessions"))
.put(NEW_SESSION, post("/session"))
.put(GET_CAPABILITIES, get("/session/:sessionId"))
.put(QUIT, delete("/session/:sessionId"))
.put(GET_CURRENT_WINDOW_HANDLE, get("/session/:sessionId/window_handle"))
.put(GET_WINDOW_HANDLES, get("/session/:sessionId/window_handles"))
.put(GET, post("/session/:sessionId/url"))
// The Alert API is still experimental and should not be used.
.put(GET_ALERT, get("/session/:sessionId/alert"))
.put(DISMISS_ALERT, post("/session/:sessionId/dismiss_alert"))
.put(ACCEPT_ALERT, post("/session/:sessionId/accept_alert"))
.put(GET_ALERT_TEXT, get("/session/:sessionId/alert_text"))
.put(SET_ALERT_VALUE, post("/session/:sessionId/alert_text"))
.put(GO_FORWARD, post("/session/:sessionId/forward"))
.put(GO_BACK, post("/session/:sessionId/back"))
.put(REFRESH, post("/session/:sessionId/refresh"))
.put(EXECUTE_SCRIPT, post("/session/:sessionId/execute"))
.put(EXECUTE_ASYNC_SCRIPT, post("/session/:sessionId/execute_async"))
.put(GET_CURRENT_URL, get("/session/:sessionId/url"))
.put(GET_TITLE, get("/session/:sessionId/title"))
.put(GET_PAGE_SOURCE, get("/session/:sessionId/source"))
.put(SCREENSHOT, get("/session/:sessionId/screenshot"))
.put(SET_BROWSER_VISIBLE, post("/session/:sessionId/visible"))
.put(IS_BROWSER_VISIBLE, get("/session/:sessionId/visible"))
.put(FIND_ELEMENT, post("/session/:sessionId/element"))
.put(FIND_ELEMENTS, post("/session/:sessionId/elements"))
.put(GET_ACTIVE_ELEMENT, post("/session/:sessionId/element/active"))
.put(FIND_CHILD_ELEMENT, post("/session/:sessionId/element/:id/element"))
.put(FIND_CHILD_ELEMENTS, post("/session/:sessionId/element/:id/elements"))
.put(CLICK_ELEMENT, post("/session/:sessionId/element/:id/click"))
.put(CLEAR_ELEMENT, post("/session/:sessionId/element/:id/clear"))
.put(SUBMIT_ELEMENT, post("/session/:sessionId/element/:id/submit"))
.put(GET_ELEMENT_TEXT, get("/session/:sessionId/element/:id/text"))
.put(SEND_KEYS_TO_ELEMENT, post("/session/:sessionId/element/:id/value"))
.put(UPLOAD_FILE, post("/session/:sessionId/file"))
.put(GET_ELEMENT_VALUE, get("/session/:sessionId/element/:id/value"))
.put(GET_ELEMENT_TAG_NAME, get("/session/:sessionId/element/:id/name"))
.put(IS_ELEMENT_SELECTED, get("/session/:sessionId/element/:id/selected"))
.put(IS_ELEMENT_ENABLED, get("/session/:sessionId/element/:id/enabled"))
.put(IS_ELEMENT_DISPLAYED, get("/session/:sessionId/element/:id/displayed"))
.put(HOVER_OVER_ELEMENT, post("/session/:sessionId/element/:id/hover"))
.put(GET_ELEMENT_LOCATION, get("/session/:sessionId/element/:id/location"))
.put(GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW,
get("/session/:sessionId/element/:id/location_in_view"))
.put(GET_ELEMENT_SIZE, get("/session/:sessionId/element/:id/size"))
.put(GET_ELEMENT_ATTRIBUTE, get("/session/:sessionId/element/:id/attribute/:name"))
.put(ELEMENT_EQUALS, get("/session/:sessionId/element/:id/equals/:other"))
.put(GET_ALL_COOKIES, get("/session/:sessionId/cookie"))
.put(ADD_COOKIE, post("/session/:sessionId/cookie"))
.put(DELETE_ALL_COOKIES, delete("/session/:sessionId/cookie"))
.put(DELETE_COOKIE, delete("/session/:sessionId/cookie/:name"))
.put(SWITCH_TO_FRAME, post("/session/:sessionId/frame"))
.put(SWITCH_TO_WINDOW, post("/session/:sessionId/window"))
.put(GET_WINDOW_SIZE, get("/session/:sessionId/window/:windowHandle/size"))
.put(GET_WINDOW_POSITION, get("/session/:sessionId/window/:windowHandle/position"))
.put(SET_WINDOW_SIZE, post("/session/:sessionId/window/:windowHandle/size"))
.put(SET_WINDOW_POSITION, post("/session/:sessionId/window/:windowHandle/position"))
.put(MAXIMIZE_WINDOW, post("/session/:sessionId/window/:windowHandle/maximize"))
.put(CLOSE, delete("/session/:sessionId/window"))
.put(DRAG_ELEMENT, post("/session/:sessionId/element/:id/drag"))
.put(GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
get("/session/:sessionId/element/:id/css/:propertyName"))
.put(IMPLICITLY_WAIT, post("/session/:sessionId/timeouts/implicit_wait"))
.put(SET_SCRIPT_TIMEOUT, post("/session/:sessionId/timeouts/async_script"))
.put(SET_TIMEOUT, post("/session/:sessionId/timeouts"))
.put(EXECUTE_SQL, post("/session/:sessionId/execute_sql"))
.put(GET_LOCATION, get("/session/:sessionId/location"))
.put(SET_LOCATION, post("/session/:sessionId/location"))
.put(GET_APP_CACHE_STATUS, get("/session/:sessionId/application_cache/status"))
.put(IS_BROWSER_ONLINE, get("/session/:sessionId/browser_connection"))
.put(SET_BROWSER_ONLINE, post("/session/:sessionId/browser_connection"))
// TODO (user): Would it be better to combine this command with
// GET_LOCAL_STORAGE_SIZE?
.put(GET_LOCAL_STORAGE_ITEM, get("/session/:sessionId/local_storage/key/:key"))
.put(REMOVE_LOCAL_STORAGE_ITEM, delete("/session/:sessionId/local_storage/key/:key"))
.put(GET_LOCAL_STORAGE_KEYS, get("/session/:sessionId/local_storage"))
.put(SET_LOCAL_STORAGE_ITEM, post("/session/:sessionId/local_storage"))
.put(CLEAR_LOCAL_STORAGE, delete("/session/:sessionId/local_storage"))
.put(GET_LOCAL_STORAGE_SIZE, get("/session/:sessionId/local_storage/size"))
// TODO (user): Would it be better to combine this command with
// GET_SESSION_STORAGE_SIZE?
.put(GET_SESSION_STORAGE_ITEM, get("/session/:sessionId/session_storage/key/:key"))
.put(REMOVE_SESSION_STORAGE_ITEM, delete("/session/:sessionId/session_storage/key/:key"))
.put(GET_SESSION_STORAGE_KEYS, get("/session/:sessionId/session_storage"))
.put(SET_SESSION_STORAGE_ITEM, post("/session/:sessionId/session_storage"))
.put(CLEAR_SESSION_STORAGE, delete("/session/:sessionId/session_storage"))
.put(GET_SESSION_STORAGE_SIZE, get("/session/:sessionId/session_storage/size"))
.put(GET_SCREEN_ORIENTATION, get("/session/:sessionId/orientation"))
.put(SET_SCREEN_ORIENTATION, post("/session/:sessionId/orientation"))
// Interactions-related commands.
.put(CLICK, post("/session/:sessionId/click"))
.put(DOUBLE_CLICK, post("/session/:sessionId/doubleclick"))
.put(MOUSE_DOWN, post("/session/:sessionId/buttondown"))
.put(MOUSE_UP, post("/session/:sessionId/buttonup"))
.put(MOVE_TO, post("/session/:sessionId/moveto"))
.put(SEND_KEYS_TO_ACTIVE_ELEMENT, post("/session/:sessionId/keys"))
// IME related commands.
.put(IME_GET_AVAILABLE_ENGINES, get("/session/:sessionId/ime/available_engines"))
.put(IME_GET_ACTIVE_ENGINE, get("/session/:sessionId/ime/active_engine"))
.put(IME_IS_ACTIVATED, get("/session/:sessionId/ime/activated"))
.put(IME_DEACTIVATE, post("/session/:sessionId/ime/deactivate"))
.put(IME_ACTIVATE_ENGINE, post("/session/:sessionId/ime/activate"))
// Advanced Touch API commands
// TODO(berrada): Refactor single tap with mouse click.
.put(TOUCH_SINGLE_TAP, post("/session/:sessionId/touch/click"))
.put(TOUCH_DOWN, post("/session/:sessionId/touch/down"))
.put(TOUCH_UP, post("/session/:sessionId/touch/up"))
.put(TOUCH_MOVE, post("/session/:sessionId/touch/move"))
.put(TOUCH_SCROLL, post("/session/:sessionId/touch/scroll"))
.put(TOUCH_DOUBLE_TAP, post("/session/:sessionId/touch/doubleclick"))
.put(TOUCH_LONG_PRESS, post("/session/:sessionId/touch/longclick"))
.put(TOUCH_FLICK, post("/session/:sessionId/touch/flick"))
.put(GET_LOG, post("/session/:sessionId/log"))
.put(GET_AVAILABLE_LOG_TYPES, get("/session/:sessionId/log/types"))
.put(STATUS, get("/status"));
nameToUrl = builder.build();
}
public void setLocalLogs(LocalLogs logs) {
this.logs = logs;
}
private void log(String logType, LogEntry entry) {
logs.addEntry(logType, entry);
}
public URL getAddressOfRemoteServer() {
return remoteServer;
}
public Response execute(Command command) throws IOException {
HttpContext context = new BasicHttpContext();
if (command.getSessionId() == null) {
if (QUIT.equals(command.getName())) {
return new Response();
}
if (!GET_ALL_SESSIONS.equals(command.getName())
&& !NEW_SESSION.equals(command.getName())) {
throw new SessionNotFoundException("Session ID is null");
}
}
CommandInfo info = nameToUrl.get(command.getName());
try {
HttpUriRequest httpMethod = info.getMethod(remoteServer, command);
setAcceptHeader(httpMethod);
if (httpMethod instanceof HttpPost) {
String payload = new BeanToJsonConverter().convert(command.getParameters());
((HttpPost) httpMethod).setEntity(new StringEntity(payload, "utf-8"));
httpMethod.addHeader("Content-Type", "application/json; charset=utf-8");
}
// Do not allow web proxy caches to cache responses to "get" commands
if (httpMethod instanceof HttpGet) {
httpMethod.addHeader("Cache-Control", "no-cache");
}
log(LogType.PROFILER, new HttpProfilerLogEntry(command.getName(), true));
HttpResponse response = fallBackExecute(context, httpMethod);
log(LogType.PROFILER, new HttpProfilerLogEntry(command.getName(), false));
response = followRedirects(client, context, response, /* redirect count */0);
final EntityWithEncoding entityWithEncoding = new EntityWithEncoding(response.getEntity());
return createResponse(response, context, entityWithEncoding);
} catch (UnsupportedCommandException e) {
if (e.getMessage() == null || "".equals(e.getMessage())) {
throw new UnsupportedOperationException(
"No information from server. Command name was: " + command.getName(),
e.getCause());
}
throw e;
}
}
private HttpResponse fallBackExecute(HttpContext context, HttpUriRequest httpMethod)
throws IOException {
try {
return client.execute(targetHost, httpMethod, context);
} catch (BindException e) {
// If we get this, there's a chance we've used all the local ephemeral sockets
// Sleep for a bit to let the OS reclaim them, then try the request again.
try {
Thread.sleep(2000);
} catch (InterruptedException ie) {
throw Throwables.propagate(ie);
}
} catch (NoHttpResponseException e) {
// If we get this, there's a chance we've used all the remote ephemeral sockets
// Sleep for a bit to let the OS reclaim them, then try the request again.
try {
Thread.sleep(2000);
} catch (InterruptedException ie) {
throw Throwables.propagate(ie);
}
}
return client.execute(targetHost, httpMethod, context);
}
private void setAcceptHeader(HttpUriRequest httpMethod) {
httpMethod.addHeader("Accept", "application/json, image/png");
}
private HttpResponse followRedirects(
HttpClient client, HttpContext context, HttpResponse response, int redirectCount) {
if (!isRedirect(response)) {
return response;
}
try {
// Make sure that the previous connection is freed.
HttpEntity httpEntity = response.getEntity();
if (httpEntity != null) {
EntityUtils.consume(httpEntity);
}
} catch (IOException e) {
throw new WebDriverException(e);
}
if (redirectCount > MAX_REDIRECTS) {
throw new WebDriverException("Maximum number of redirects exceeded. Aborting");
}
String location = response.getFirstHeader("location").getValue();
URI uri;
try {
uri = buildUri(context, location);
HttpGet get = new HttpGet(uri);
setAcceptHeader(get);
HttpResponse newResponse = client.execute(targetHost, get, context);
return followRedirects(client, context, newResponse, redirectCount + 1);
} catch (URISyntaxException e) {
throw new WebDriverException(e);
} catch (ClientProtocolException e) {
throw new WebDriverException(e);
} catch (IOException e) {
throw new WebDriverException(e);
}
}
private URI buildUri(HttpContext context, String location) throws URISyntaxException {
URI uri;
uri = new URI(location);
if (!uri.isAbsolute()) {
HttpHost host = (HttpHost) context.getAttribute(HTTP_TARGET_HOST);
uri = new URI(host.toURI() + location);
}
return uri;
}
private boolean isRedirect(HttpResponse response) {
int code = response.getStatusLine().getStatusCode();
return (code == 301 || code == 302 || code == 303 || code == 307)
&& response.containsHeader("location");
}
class EntityWithEncoding {
private final String charSet;
private final byte[] content;
EntityWithEncoding(HttpEntity entity) throws IOException {
try {
if (entity != null) {
content = EntityUtils.toByteArray(entity);
Charset entityCharset = ContentType.getOrDefault(entity).getCharset();
charSet = entityCharset != null ? entityCharset.name() : null;
} else {
content = new byte[0];
charSet = null;
}
} finally {
EntityUtils.consume(entity);
}
}
public String getContentString()
throws UnsupportedEncodingException {
return new String(content, charSet != null ? charSet : "utf-8");
}
public byte[] getContent() {
return content;
}
public boolean hasEntityContent() {
return content != null;
}
}
private Response createResponse(HttpResponse httpResponse, HttpContext context,
EntityWithEncoding entityWithEncoding) throws IOException {
final Response response;
Header header = httpResponse.getFirstHeader("Content-Type");
if (header != null && header.getValue().startsWith("application/json")) {
String responseAsText = entityWithEncoding.getContentString();
try {
response = new JsonToBeanConverter().convert(Response.class, responseAsText);
} catch (ClassCastException e) {
if (responseAsText != null && "".equals(responseAsText)) {
// The remote server has died, but has already set some headers.
// Normally this occurs when the final window of the firefox driver
// is closed on OS X. Return null, as the return value _should_ be
// being ignored. This is not an elegant solution.
return null;
}
throw new WebDriverException("Cannot convert text to response: " + responseAsText, e);
}
} else {
response = new Response();
if (header != null && header.getValue().startsWith("image/png")) {
response.setValue(entityWithEncoding.getContent());
} else if (entityWithEncoding.hasEntityContent()) {
response.setValue(entityWithEncoding.getContentString());
}
HttpHost finalHost = (HttpHost) context.getAttribute(HTTP_TARGET_HOST);
String uri = finalHost.toURI();
String sessionId = HttpSessionId.getSessionId(uri);
if (sessionId != null) {
response.setSessionId(sessionId);
}
int statusCode = httpResponse.getStatusLine().getStatusCode();
if (!(statusCode > 199 && statusCode < 300)) {
// 4xx represents an unknown command or a bad request.
if (statusCode > 399 && statusCode < 500) {
response.setStatus(ErrorCodes.UNKNOWN_COMMAND);
} else if (statusCode > 499 && statusCode < 600) {
// 5xx represents an internal server error. The response status should already be set, but
// if not, set it to a general error code.
if (response.getStatus() == ErrorCodes.SUCCESS) {
response.setStatus(ErrorCodes.UNHANDLED_ERROR);
}
} else {
response.setStatus(ErrorCodes.UNHANDLED_ERROR);
}
}
if (response.getValue() instanceof String) {
// We normalise to \n because Java will translate this to \r\n
// if this is suitable on our platform, and if we have \r\n, java will
// turn this into \r\r\n, which would be Bad!
response.setValue(((String) response.getValue()).replace("\r\n", "\n"));
}
}
response.setState(errorCodes.toState(response.getStatus()));
return response;
}
private static CommandInfo get(String url) {
return new CommandInfo(url, HttpVerb.GET);
}
private static CommandInfo post(String url) {
return new CommandInfo(url, HttpVerb.POST);
}
private static CommandInfo delete(String url) {
return new CommandInfo(url, HttpVerb.DELETE);
}
}
| 1 | 10,691 | It would be better to just change RWD to throw IllegalStateException if you attempt to execute a command after quit (unless it's a second call to quit()) | SeleniumHQ-selenium | rb |
@@ -130,13 +130,13 @@ func NewExportPipeline(config Config, options ...controller.Option) (*Exporter,
// InstallNewPipeline instantiates a NewExportPipeline and registers it globally.
// Typically called as:
//
-// hf, err := prometheus.InstallNewPipeline(prometheus.Config{...})
+// exporter, err := prometheus.InstallNewPipeline(prometheus.Config{...})
//
// if err != nil {
// ...
// }
-// http.HandleFunc("/metrics", hf)
-// defer pipeline.Stop()
+// http.HandleFunc("/metrics", exporter)
+// defer exporter.Controller().Stop(context.TODO())
// ... Done
func InstallNewPipeline(config Config, options ...controller.Option) (*Exporter, error) {
exp, err := NewExportPipeline(config, options...) | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus // import "go.opentelemetry.io/otel/exporters/metric/prometheus"
// Note that this package does not support a way to export Prometheus
// Summary data points, removed in PR#1412.
import (
"context"
"fmt"
"net/http"
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/global"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
selector "go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
// Exporter supports Prometheus pulls. It does not implement the
// sdk/export/metric.Exporter interface--instead it creates a pull
// controller and reads the latest checkpointed data on-scrape.
type Exporter struct {
handler http.Handler
registerer prometheus.Registerer
gatherer prometheus.Gatherer
// lock protects access to the controller. The controller
// exposes its own lock, but using a dedicated lock in this
// struct allows the exporter to potentially support multiple
// controllers (e.g., with different resources).
lock sync.RWMutex
controller *controller.Controller
defaultHistogramBoundaries []float64
}
// ErrUnsupportedAggregator is returned for unrepresentable aggregator
// types (e.g., exact).
var ErrUnsupportedAggregator = fmt.Errorf("unsupported aggregator type")
var _ http.Handler = &Exporter{}
// Config is a set of configs for the tally reporter.
type Config struct {
// Registry is the prometheus registry that will be used as the default Registerer and
// Gatherer if these are not specified.
//
// If not set a new empty Registry is created.
Registry *prometheus.Registry
// Registerer is the prometheus registerer to register
// metrics with.
//
// If not specified the Registry will be used as default.
Registerer prometheus.Registerer
// Gatherer is the prometheus gatherer to gather
// metrics with.
//
// If not specified the Registry will be used as default.
Gatherer prometheus.Gatherer
// DefaultHistogramBoundaries defines the default histogram bucket
// boundaries.
DefaultHistogramBoundaries []float64
}
// NewExporter returns a new Prometheus exporter using the configured
// metric controller. See controller.New().
func NewExporter(config Config, controller *controller.Controller) (*Exporter, error) {
if config.Registry == nil {
config.Registry = prometheus.NewRegistry()
}
if config.Registerer == nil {
config.Registerer = config.Registry
}
if config.Gatherer == nil {
config.Gatherer = config.Registry
}
e := &Exporter{
handler: promhttp.HandlerFor(config.Gatherer, promhttp.HandlerOpts{}),
registerer: config.Registerer,
gatherer: config.Gatherer,
controller: controller,
defaultHistogramBoundaries: config.DefaultHistogramBoundaries,
}
c := &collector{
exp: e,
}
if err := config.Registerer.Register(c); err != nil {
return nil, fmt.Errorf("cannot register the collector: %w", err)
}
return e, nil
}
// NewExportPipeline sets up a complete export pipeline with the recommended setup,
// using the recommended selector and standard processor. See the controller.Options.
func NewExportPipeline(config Config, options ...controller.Option) (*Exporter, error) {
return NewExporter(config, defaultController(config, options...))
}
// InstallNewPipeline instantiates a NewExportPipeline and registers it globally.
// Typically called as:
//
// hf, err := prometheus.InstallNewPipeline(prometheus.Config{...})
//
// if err != nil {
// ...
// }
// http.HandleFunc("/metrics", hf)
// defer pipeline.Stop()
// ... Done
func InstallNewPipeline(config Config, options ...controller.Option) (*Exporter, error) {
exp, err := NewExportPipeline(config, options...)
if err != nil {
return nil, err
}
global.SetMeterProvider(exp.MeterProvider())
return exp, nil
}
// defaultController returns a standard *controller.Controller for use
// with Prometheus.
func defaultController(config Config, options ...controller.Option) *controller.Controller {
return controller.New(
processor.New(
selector.NewWithHistogramDistribution(
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
),
export.CumulativeExportKindSelector(),
processor.WithMemory(true),
),
options...,
)
}
// MeterProvider returns the MeterProvider of this exporter.
func (e *Exporter) MeterProvider() metric.MeterProvider {
return e.controller.MeterProvider()
}
// Controller returns the controller object that coordinates collection for the SDK.
func (e *Exporter) Controller() *controller.Controller {
e.lock.RLock()
defer e.lock.RUnlock()
return e.controller
}
// ExportKindFor implements ExportKindSelector.
func (e *Exporter) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) export.ExportKind {
return export.CumulativeExportKindSelector().ExportKindFor(desc, kind)
}
// ServeHTTP implements http.Handler.
func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
e.handler.ServeHTTP(w, r)
}
// collector implements prometheus.Collector interface.
type collector struct {
exp *Exporter
}
var _ prometheus.Collector = (*collector)(nil)
// Describe implements prometheus.Collector.
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
c.exp.lock.RLock()
defer c.exp.lock.RUnlock()
_ = c.exp.Controller().ForEach(c.exp, func(record export.Record) error {
var labelKeys []string
mergeLabels(record, &labelKeys, nil)
ch <- c.toDesc(record, labelKeys)
return nil
})
}
// Collect exports the last calculated CheckpointSet.
//
// Collect is invoked whenever prometheus.Gatherer is also invoked.
// For example, when the HTTP endpoint is invoked by Prometheus.
func (c *collector) Collect(ch chan<- prometheus.Metric) {
c.exp.lock.RLock()
defer c.exp.lock.RUnlock()
ctrl := c.exp.Controller()
if err := ctrl.Collect(context.Background()); err != nil {
otel.Handle(err)
}
err := ctrl.ForEach(c.exp, func(record export.Record) error {
agg := record.Aggregation()
numberKind := record.Descriptor().NumberKind()
instrumentKind := record.Descriptor().InstrumentKind()
var labelKeys, labels []string
mergeLabels(record, &labelKeys, &labels)
desc := c.toDesc(record, labelKeys)
if hist, ok := agg.(aggregation.Histogram); ok {
if err := c.exportHistogram(ch, hist, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting histogram: %w", err)
}
} else if sum, ok := agg.(aggregation.Sum); ok && instrumentKind.Monotonic() {
if err := c.exportMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting monotonic counter: %w", err)
}
} else if sum, ok := agg.(aggregation.Sum); ok && !instrumentKind.Monotonic() {
if err := c.exportNonMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting non monotonic counter: %w", err)
}
} else if lastValue, ok := agg.(aggregation.LastValue); ok {
if err := c.exportLastValue(ch, lastValue, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting last value: %w", err)
}
} else {
return fmt.Errorf("%w: %s", ErrUnsupportedAggregator, agg.Kind())
}
return nil
})
if err != nil {
otel.Handle(err)
}
}
func (c *collector) exportLastValue(ch chan<- prometheus.Metric, lvagg aggregation.LastValue, kind number.Kind, desc *prometheus.Desc, labels []string) error {
lv, _, err := lvagg.LastValue()
if err != nil {
return fmt.Errorf("error retrieving last value: %w", err)
}
m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, lv.CoerceToFloat64(kind), labels...)
if err != nil {
return fmt.Errorf("error creating constant metric: %w", err)
}
ch <- m
return nil
}
func (c *collector) exportNonMonotonicCounter(ch chan<- prometheus.Metric, sum aggregation.Sum, kind number.Kind, desc *prometheus.Desc, labels []string) error {
v, err := sum.Sum()
if err != nil {
return fmt.Errorf("error retrieving counter: %w", err)
}
m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, v.CoerceToFloat64(kind), labels...)
if err != nil {
return fmt.Errorf("error creating constant metric: %w", err)
}
ch <- m
return nil
}
func (c *collector) exportMonotonicCounter(ch chan<- prometheus.Metric, sum aggregation.Sum, kind number.Kind, desc *prometheus.Desc, labels []string) error {
v, err := sum.Sum()
if err != nil {
return fmt.Errorf("error retrieving counter: %w", err)
}
m, err := prometheus.NewConstMetric(desc, prometheus.CounterValue, v.CoerceToFloat64(kind), labels...)
if err != nil {
return fmt.Errorf("error creating constant metric: %w", err)
}
ch <- m
return nil
}
func (c *collector) exportHistogram(ch chan<- prometheus.Metric, hist aggregation.Histogram, kind number.Kind, desc *prometheus.Desc, labels []string) error {
buckets, err := hist.Histogram()
if err != nil {
return fmt.Errorf("error retrieving histogram: %w", err)
}
sum, err := hist.Sum()
if err != nil {
return fmt.Errorf("error retrieving sum: %w", err)
}
var totalCount uint64
// counts maps from the bucket upper-bound to the cumulative count.
// The bucket with upper-bound +inf is not included.
counts := make(map[float64]uint64, len(buckets.Boundaries))
for i := range buckets.Boundaries {
boundary := buckets.Boundaries[i]
totalCount += uint64(buckets.Counts[i])
counts[boundary] = totalCount
}
// Include the +inf bucket in the total count.
totalCount += uint64(buckets.Counts[len(buckets.Counts)-1])
m, err := prometheus.NewConstHistogram(desc, totalCount, sum.CoerceToFloat64(kind), counts, labels...)
if err != nil {
return fmt.Errorf("error creating constant histogram: %w", err)
}
ch <- m
return nil
}
func (c *collector) toDesc(record export.Record, labelKeys []string) *prometheus.Desc {
desc := record.Descriptor()
return prometheus.NewDesc(sanitize(desc.Name()), desc.Description(), labelKeys, nil)
}
// mergeLabels merges the export.Record's labels and resources into a
// single set, giving precedence to the record's labels in case of
// duplicate keys. This outputs one or both of the keys and the
// values as a slice, and either argument may be nil to avoid
// allocating an unnecessary slice.
func mergeLabels(record export.Record, keys, values *[]string) {
if keys != nil {
*keys = make([]string, 0, record.Labels().Len()+record.Resource().Len())
}
if values != nil {
*values = make([]string, 0, record.Labels().Len()+record.Resource().Len())
}
// Duplicate keys are resolved by taking the record label value over
// the resource value.
mi := attribute.NewMergeIterator(record.Labels(), record.Resource().Set())
for mi.Next() {
label := mi.Label()
if keys != nil {
*keys = append(*keys, sanitize(string(label.Key)))
}
if values != nil {
*values = append(*values, label.Value.Emit())
}
}
}
| 1 | 14,843 | Could we move this to an example test to allow the compiler to help us ensure it stays up-to-date in the future? | open-telemetry-opentelemetry-go | go |
@@ -380,14 +380,6 @@ describe "Bolt::CLI" do
cli = Bolt::CLI.new(%w[command run uptime --password opensesame --nodes foo])
expect(cli.parse).to include(password: 'opensesame')
end
-
- it "prompts the user for password if not specified" do
- allow(STDIN).to receive(:noecho).and_return('opensesame')
- allow(STDOUT).to receive(:print).with('Please enter your password: ')
- allow(STDOUT).to receive(:puts)
- cli = Bolt::CLI.new(%w[command run uptime --nodes foo --password])
- expect(cli.parse).to include(password: 'opensesame')
- end
end
describe "key" do | 1 | # frozen_string_literal: true
require 'spec_helper'
require 'bolt_spec/files'
require 'bolt_spec/task'
require 'bolt/cli'
require 'bolt/util'
require 'concurrent/utility/processor_counter'
require 'r10k/action/puppetfile/install'
require 'yaml'
describe "Bolt::CLI" do
include BoltSpec::Files
include BoltSpec::Task
let(:target) { Bolt::Target.new('foo') }
before(:each) do
outputter = Bolt::Outputter::Human.new(false, false, false, StringIO.new)
allow_any_instance_of(Bolt::CLI).to receive(:outputter).and_return(outputter)
allow_any_instance_of(Bolt::CLI).to receive(:warn)
# Don't allow tests to override the captured log config
allow(Bolt::Logger).to receive(:configure)
Logging.logger[:root].level = :info
end
def stub_file(path)
stat = double('stat', readable?: true, file?: true, directory?: false)
allow(Bolt::Util).to receive(:file_stat).with(path).and_return(stat)
end
def stub_non_existent_file(path)
allow(Bolt::Util).to receive(:file_stat).with(path).and_raise(
Errno::ENOENT, "No such file or directory @ rb_file_s_stat - #{path}"
)
end
def stub_unreadable_file(path)
stat = double('stat', readable?: false, file?: true)
allow(Bolt::Util).to receive(:file_stat).with(path).and_return(stat)
end
def stub_directory(path)
stat = double('stat', readable?: true, file?: false, directory?: true)
allow(Bolt::Util).to receive(:file_stat).with(path).and_return(stat)
end
def stub_config(file_content = {})
allow(Bolt::Util).to receive(:read_config_file).and_return(file_content)
end
context "without a config file" do
let(:boltdir) { Bolt::Boltdir.new('.') }
before(:each) do
allow(Bolt::Boltdir).to receive(:find_boltdir).and_return(boltdir)
allow_any_instance_of(Bolt::Boltdir).to receive(:resource_types)
allow(Bolt::Util).to receive(:read_config_file).and_return({})
end
it "generates an error message if an unknown argument is given" do
cli = Bolt::CLI.new(%w[command run --unknown])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Unknown argument '--unknown'/)
end
it "generates an error message if an unknown subcommand is given" do
cli = Bolt::CLI.new(%w[-n bolt1 bolt2 command run whoami])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Expected subcommand 'bolt2' to be one of/)
end
it "generates an error message if an unknown action is given" do
cli = Bolt::CLI.new(%w[-n bolt1 command oops whoami])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Expected action 'oops' to be one of/)
end
it "generates an error message is no action is given and one is expected" do
cli = Bolt::CLI.new(%w[-n bolt1 command])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Expected an action/)
end
it "works without an action if no action is expected" do
cli = Bolt::CLI.new(%w[-n bolt1 apply file.pp])
expect {
cli.parse
}.not_to raise_error
end
describe "help" do
it "generates help when no arguments are specified" do
cli = Bolt::CLI.new([])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Usage: bolt/).to_stdout
end
it "accepts --help" do
cli = Bolt::CLI.new(%w[--help])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Usage: bolt/).to_stdout
end
context 'listing actions with help' do
it 'accepts command' do
cli = Bolt::CLI.new(%w[help command])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Available actions are:.*run/m).to_stdout
end
it 'accepts script' do
cli = Bolt::CLI.new(%w[help script])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Available actions are:.*run/m).to_stdout
end
it 'accepts task' do
cli = Bolt::CLI.new(%w[help task])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Available actions are:.*show.*run/m).to_stdout
end
it 'accepts plan' do
cli = Bolt::CLI.new(%w[help plan])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Available actions are:.*show.*run/m).to_stdout
end
it 'accepts file' do
cli = Bolt::CLI.new(%w[help file])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Available actions are:.*upload/m).to_stdout
end
it 'accepts puppetfile' do
cli = Bolt::CLI.new(%w[help puppetfile])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Available actions are:.*install.*show-modules/m).to_stdout
end
it 'accepts inventory' do
cli = Bolt::CLI.new(%w[help inventory])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Available actions are:.*show/m).to_stdout
end
it 'excludes invalid subcommand flags' do
cli = Bolt::CLI.new(%w[help puppetfile])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.not_to output(/--private-key/).to_stdout
end
it 'excludes invalid subcommand action flags and help text' do
cli = Bolt::CLI.new(%w[help plan show])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.not_to output(/[parameters].*nodes/m).to_stdout
end
it 'accepts apply' do
cli = Bolt::CLI.new(%w[help apply])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/Usage: bolt apply <manifest.pp>/m).to_stdout
end
end
end
describe "version" do
it "emits a version string" do
cli = Bolt::CLI.new(%w[--version])
expect {
expect {
cli.parse
}.to raise_error(Bolt::CLIExit)
}.to output(/\d+\.\d+\.\d+/).to_stdout
end
end
describe "nodes" do
let(:targets) { [target, Bolt::Target.new('bar')] }
it "accepts a single node" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo])
expect(cli.parse).to include(targets: [target])
end
it "accepts multiple nodes" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo,bar])
expect(cli.parse).to include(targets: targets)
end
it "accepts multiple nodes across multiple declarations" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo,bar --nodes bar,more,bars])
extra_targets = [Bolt::Target.new('more'), Bolt::Target.new('bars')]
expect(cli.parse).to include(targets: targets + extra_targets)
end
it "reads from stdin when --nodes is '-'" do
nodes = <<~'NODES'
foo
bar
NODES
cli = Bolt::CLI.new(%w[command run uptime --nodes -])
allow(STDIN).to receive(:read).and_return(nodes)
result = cli.parse
expect(result[:targets]).to eq(targets)
end
it "reads from a file when --nodes starts with @" do
nodes = <<~'NODES'
foo
bar
NODES
with_tempfile_containing('nodes-args', nodes) do |file|
cli = Bolt::CLI.new(%W[command run uptime --nodes @#{file.path}])
result = cli.parse
expect(result[:targets]).to eq(targets)
end
end
it "strips leading and trailing whitespace" do
nodes = " foo\nbar \nbaz\nqux "
with_tempfile_containing('nodes-args', nodes) do |file|
cli = Bolt::CLI.new(%W[command run uptime --nodes @#{file.path}])
result = cli.parse
extra_targets = [Bolt::Target.new('baz'), Bolt::Target.new('qux')]
expect(result[:targets]).to eq(targets + extra_targets)
end
end
it "expands tilde to a user directory when --nodes starts with @" do
expect(File).to receive(:read).with(File.join(Dir.home, 'nodes.txt')).and_return("foo\nbar\n")
cli = Bolt::CLI.new(%w[command run uptime --nodes @~/nodes.txt])
allow(cli).to receive(:puppetdb_client)
result = cli.parse
expect(result[:targets]).to eq(targets)
end
it "generates an error message if no nodes given" do
cli = Bolt::CLI.new(%w[command run uptime --nodes])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Option '--nodes' needs a parameter/)
end
it "generates an error message if nodes is omitted" do
cli = Bolt::CLI.new(%w[command run uptime])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Command requires a targeting option/)
end
end
describe "targets" do
let(:targets) { [target, Bolt::Target.new('bar')] }
it "reads from a file when --nodes starts with @" do
nodes = <<~'NODES'
foo
bar
NODES
with_tempfile_containing('nodes-args', nodes) do |file|
cli = Bolt::CLI.new(%W[command run uptime --targets @#{file.path}])
result = cli.parse
expect(result[:targets]).to eq(targets)
end
end
it "generates an error message if no targets are given" do
cli = Bolt::CLI.new(%w[command run uptime --targets])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Option '--targets' needs a parameter/)
end
it "generates an error if nodes and targets are specified" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo --targets bar])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Only one targeting option/)
end
end
describe "query" do
it "accepts a query" do
cli = Bolt::CLI.new(%w[command run id --query nodes{}])
allow(cli).to receive(:query_puppetdb_nodes).and_return([])
result = cli.parse
expect(result[:query]).to eq('nodes{}')
end
it "resolves targets based on the query" do
cli = Bolt::CLI.new(%w[command run id --query nodes{}])
allow(cli).to receive(:query_puppetdb_nodes).and_return(%w[foo bar])
targets = [Bolt::Target.new('foo'), Bolt::Target.new('bar')]
result = cli.parse
expect(result[:targets]).to eq(targets)
end
it "fails if it can't retrieve targets from PuppetDB" do
cli = Bolt::CLI.new(%w[command run id --query nodes{}])
puppetdb = double('puppetdb')
allow(puppetdb).to receive(:query_certnames).and_raise(Bolt::PuppetDBError, "failed to puppetdb the nodes")
allow(cli).to receive(:puppetdb_client).and_return(puppetdb)
expect { cli.parse }
.to raise_error(Bolt::PuppetDBError, /failed to puppetdb the nodes/)
end
it "fails if both --nodes and --query are specified" do
cli = Bolt::CLI.new(%w[command run id --query nodes{} --nodes foo,bar])
expect { cli.parse }.to raise_error(Bolt::CLIError, /Only one/)
end
end
describe "user" do
it "accepts a user" do
cli = Bolt::CLI.new(%w[command run uptime --user root --nodes foo])
expect(cli.parse).to include(user: 'root')
end
it "generates an error message if no user value is given" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo --user])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Option '--user' needs a parameter/)
end
end
describe "password" do
it "accepts a password" do
cli = Bolt::CLI.new(%w[command run uptime --password opensesame --nodes foo])
expect(cli.parse).to include(password: 'opensesame')
end
it "prompts the user for password if not specified" do
allow(STDIN).to receive(:noecho).and_return('opensesame')
allow(STDOUT).to receive(:print).with('Please enter your password: ')
allow(STDOUT).to receive(:puts)
cli = Bolt::CLI.new(%w[command run uptime --nodes foo --password])
expect(cli.parse).to include(password: 'opensesame')
end
end
describe "key" do
it "accepts a private key" do
cli = Bolt::CLI.new(%w[ command run uptime
--private-key ~/.ssh/google_compute_engine
--nodes foo])
expect(cli.parse).to include('private-key': '~/.ssh/google_compute_engine')
expect(cli.config.transports[:ssh]['private-key']).to eq('~/.ssh/google_compute_engine')
end
it "generates an error message if no key value is given" do
cli = Bolt::CLI.new(%w[command run --nodes foo --private-key])
expect {
cli.parse
}.to raise_error(Bolt::CLIError,
/Option '--private-key' needs a parameter/)
end
end
describe "concurrency" do
it "accepts a concurrency limit" do
cli = Bolt::CLI.new(%w[command run uptime --concurrency 10 --nodes foo])
expect(cli.parse).to include(concurrency: 10)
end
it "defaults to 100" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo])
cli.parse
expect(cli.config.concurrency).to eq(100)
end
it "generates an error message if no concurrency value is given" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo --concurrency])
expect {
cli.parse
}.to raise_error(Bolt::CLIError,
/Option '--concurrency' needs a parameter/)
end
end
describe "compile-concurrency" do
it "accepts a concurrency limit" do
cli = Bolt::CLI.new(%w[command run uptime --compile-concurrency 2 --nodes foo])
expect(cli.parse).to include('compile-concurrency': 2)
end
it "defaults to unset" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo])
cli.parse
# verifies Etc.nprocessors is the same as Concurrent.processor_count
expect(cli.config.compile_concurrency).to eq(Concurrent.processor_count)
end
it "generates an error message if no concurrency value is given" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo --compile-concurrency])
expect {
cli.parse
}.to raise_error(Bolt::CLIError,
/Option '--compile-concurrency' needs a parameter/)
end
end
describe "console log level" do
it "is not sensitive to ordering of debug and verbose" do
expect(Bolt::Logger).to receive(:configure).with({ 'console' => { level: :debug } }, true)
cli = Bolt::CLI.new(%w[command run uptime --nodes foo --debug --verbose])
cli.parse
end
end
describe "host-key-check" do
it "accepts `--host-key-check`" do
cli = Bolt::CLI.new(%w[command run uptime --host-key-check --nodes foo])
cli.parse
expect(cli.config.transports[:ssh]['host-key-check']).to eq(true)
end
it "accepts `--no-host-key-check`" do
cli = Bolt::CLI.new(%w[command run uptime --no-host-key-check --nodes foo])
cli.parse
expect(cli.config.transports[:ssh]['host-key-check']).to eq(false)
end
it "defaults to nil" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo])
cli.parse
expect(cli.config.transports[:ssh]['host-key-check']).to eq(nil)
end
end
describe "connect-timeout" do
it "accepts a specific timeout" do
cli = Bolt::CLI.new(%w[command run uptime --connect-timeout 123 --nodes foo])
expect(cli.parse).to include('connect-timeout': 123)
end
it "generates an error message if no timeout value is given" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo --connect-timeout])
expect {
cli.parse
}.to raise_error(Bolt::CLIError,
/Option '--connect-timeout' needs a parameter/)
end
end
describe "modulepath" do
it "treats relative modulepath as relative to pwd" do
site = File.expand_path('site')
modulepath = [site, 'modules'].join(File::PATH_SEPARATOR)
cli = Bolt::CLI.new(%W[command run uptime --modulepath #{modulepath} --nodes foo])
expect(cli.parse).to include(modulepath: [site, File.expand_path('modules')])
end
it "accepts shorthand -m" do
site = File.expand_path('site')
modulepath = [site, 'modules'].join(File::PATH_SEPARATOR)
cli = Bolt::CLI.new(%W[command run uptime -m #{modulepath} --nodes foo])
expect(cli.parse).to include(modulepath: [site, File.expand_path('modules')])
end
it "generates an error message if no value is given" do
cli = Bolt::CLI.new(%w[command run uptime --nodes foo --modulepath])
expect {
cli.parse
}.to raise_error(Bolt::CLIError,
/Option '--modulepath' needs a parameter/)
end
end
describe "sudo" do
it "supports running as a user" do
cli = Bolt::CLI.new(%w[command run --nodes foo whoami --run-as root])
expect(cli.parse[:'run-as']).to eq('root')
end
end
describe "sudo-password" do
it "accepts a password" do
cli = Bolt::CLI.new(%w[command run uptime --sudo-password opensez --run-as alibaba --nodes foo])
expect(cli.parse).to include('sudo-password': 'opensez')
end
it "prompts the user for sudo-password if not specified" do
allow(STDIN).to receive(:noecho).and_return('opensez')
pw_prompt = 'Please enter your privilege escalation password: '
allow(STDOUT).to receive(:print).with(pw_prompt)
allow(STDOUT).to receive(:puts)
cli = Bolt::CLI.new(%w[command run uptime --nodes foo --run-as alibaba --sudo-password])
expect(cli.parse).to include('sudo-password': 'opensez')
end
end
describe "transport" do
it "defaults to 'ssh'" do
cli = Bolt::CLI.new(%w[command run --nodes foo whoami])
cli.parse
expect(cli.config.transport).to eq('ssh')
end
it "accepts ssh" do
cli = Bolt::CLI.new(%w[command run --transport ssh --nodes foo id])
expect(cli.parse[:transport]).to eq('ssh')
end
it "accepts winrm" do
cli = Bolt::CLI.new(%w[command run --transport winrm --nodes foo id])
expect(cli.parse[:transport]).to eq('winrm')
end
it "accepts pcp" do
cli = Bolt::CLI.new(%w[command run --transport pcp --nodes foo id])
expect(cli.parse[:transport]).to eq('pcp')
end
it "rejects invalid transports" do
cli = Bolt::CLI.new(%w[command run --transport holodeck --nodes foo id])
expect {
cli.parse
}.to raise_error(Bolt::CLIError,
/Invalid parameter specified for option '--transport': holodeck/)
end
end
describe "command" do
it "interprets whoami as the command" do
cli = Bolt::CLI.new(%w[command run --nodes foo whoami])
expect(cli.parse[:object]).to eq('whoami')
end
it "errors when a command is not specified" do
cli = Bolt::CLI.new(%w[command run --nodes foo])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Must specify a command to run/)
end
it "errors when a command is empty string" do
cli = Bolt::CLI.new(['command', 'run', '', '--nodes', 'foo'])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Must specify a command to run/)
end
end
it "distinguishes subcommands" do
cli = Bolt::CLI.new(%w[script run --nodes foo])
expect(cli.parse).to include(subcommand: 'script')
end
describe "file" do
describe "upload" do
it "uploads a file" do
cli = Bolt::CLI.new(%w[file upload ./src /path/dest --nodes foo])
result = cli.parse
expect(result[:object]).to eq('./src')
expect(result[:leftovers].first).to eq('/path/dest')
end
end
end
describe "handling parameters" do
it "returns {} if none are specified" do
cli = Bolt::CLI.new(%w[plan run my::plan --modulepath .])
result = cli.parse
expect(result[:task_options]).to eq({})
end
it "reads params on the command line" do
cli = Bolt::CLI.new(%w[plan run my::plan kj=2hv iuhg=iube 2whf=lcv
--modulepath .])
result = cli.parse
expect(result[:params_parsed]).to eq(false)
expect(result[:task_options]).to eq('kj' => '2hv',
'iuhg' => 'iube',
'2whf' => 'lcv')
end
it "reads params in json with the params flag" do
json_args = '{"kj":"2hv","iuhg":"iube","2whf":"lcv"}'
cli = Bolt::CLI.new(['plan', 'run', 'my::plan', '--params', json_args,
'--modulepath', '.'])
result = cli.parse
expect(result[:params_parsed]).to eq(true)
expect(result[:task_options]).to eq('kj' => '2hv',
'iuhg' => 'iube',
'2whf' => 'lcv')
end
it "raises a cli error when json parsing fails" do
json_args = '{"k'
cli = Bolt::CLI.new(['plan', 'run', 'my::plan', '--params', json_args])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /unexpected token/)
end
it "raises a cli error when specifying params both ways" do
cli = Bolt::CLI.new(%w[plan run my::plan --params {"a":"b"} c=d
--modulepath .])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /not both/)
end
it "reads json from a file when --params starts with @" do
json_args = '{"kj":"2hv","iuhg":"iube","2whf":"lcv"}'
with_tempfile_containing('json-args', json_args) do |file|
cli = Bolt::CLI.new(%W[plan run my::plan --params @#{file.path}
--modulepath .])
result = cli.parse
expect(result[:task_options]).to eq('kj' => '2hv',
'iuhg' => 'iube',
'2whf' => 'lcv')
end
end
it "raises a cli error when reading the params file fails" do
Dir.mktmpdir do |dir|
cli = Bolt::CLI.new(%W[plan run my::plan --params @#{dir}/nope
--modulepath .])
expect {
cli.parse
}.to raise_error(Bolt::FileError, /No such file/)
end
end
it "reads json from stdin when --params is just '-'" do
json_args = '{"kj":"2hv","iuhg":"iube","2whf":"lcv"}'
cli = Bolt::CLI.new(%w[plan run my::plan --params - --modulepath .])
allow(STDIN).to receive(:read).and_return(json_args)
result = cli.parse
expect(result[:task_options]).to eq('kj' => '2hv',
'iuhg' => 'iube',
'2whf' => 'lcv')
end
end
describe 'task' do
it "errors without a task" do
cli = Bolt::CLI.new(%w[task run -n example.com --modulepath .])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Must specify/)
end
it "errors if task is a parameter" do
cli = Bolt::CLI.new(%w[task run -n example.com --modulepath . p1=v1])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Invalid task/)
end
it "fails show with --noop" do
expected = "Option '--noop' may only be specified when running a task or applying manifest code"
expect {
cli = Bolt::CLI.new(%w[task show foo --nodes bar --noop])
cli.parse
}.to raise_error(Bolt::CLIError, expected)
end
end
describe 'plan' do
it "errors without a plan" do
cli = Bolt::CLI.new(%w[plan run --modulepath . nodes=example.com])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Invalid plan/)
end
it "errors if plan is a parameter" do
cli = Bolt::CLI.new(%w[plan run nodes=example.com --modulepath . p1=v1])
expect {
cli.parse
}.to raise_error(Bolt::CLIError, /Invalid plan/)
end
it "accepts targets resulting from --query from puppetdb" do
cli = Bolt::CLI.new(%w[plan run foo --query nodes{}])
allow(cli).to receive(:query_puppetdb_nodes).once.and_return(%w[foo bar])
targets = [Bolt::Target.new('foo'), Bolt::Target.new('bar')]
result = cli.parse
cli.validate(result)
cli.execute(result)
expect(result[:targets]).to eq(targets)
expect(result[:target_args]).to eq(%w[foo bar])
end
it "fails when --nodes AND --query provided" do
expect {
cli = Bolt::CLI.new(%w[plan run foo --query nodes{} --nodes bar])
cli.parse
}.to raise_error(Bolt::CLIError, /Only one targeting option/)
end
it "fails with --noop" do
expected = "Option '--noop' may only be specified when running a task or applying manifest code"
expect {
cli = Bolt::CLI.new(%w[plan run foo --nodes bar --noop])
cli.parse
}.to raise_error(Bolt::CLIError, expected)
end
end
describe 'apply' do
it "errors without an object or inline code" do
expect {
cli = Bolt::CLI.new(%w[apply --nodes bar])
cli.parse
}.to raise_error(Bolt::CLIError, 'a manifest file or --execute is required')
end
it "errors with both an object and inline code" do
expect {
cli = Bolt::CLI.new(%w[apply foo.pp --execute hello --nodes bar])
cli.parse
}.to raise_error(Bolt::CLIError, '--execute is unsupported when specifying a manifest file')
end
end
describe "bundled_content" do
let(:empty_content) {
{ "Plan" => [],
"Plugin" => Bolt::Plugin::BUILTIN_PLUGINS,
"Task" => [] }
}
it "does not calculate bundled content for a command" do
cli = Bolt::CLI.new(%w[command run foo --nodes bar])
cli.parse
expect(cli.bundled_content).to eq(empty_content)
end
it "does not calculate bundled content for a script" do
cli = Bolt::CLI.new(%w[script run foo --nodes bar])
cli.parse
expect(cli.bundled_content).to eq(empty_content)
end
it "does not calculate bundled content for a file" do
cli = Bolt::CLI.new(%w[file upload /tmp /var foo --nodes bar])
cli.parse
expect(cli.bundled_content).to eq(empty_content)
end
it "calculates bundled content for a task" do
cli = Bolt::CLI.new(%w[task run foo --nodes bar])
cli.parse
expect(cli.bundled_content['Task']).not_to be_empty
end
it "calculates bundled content for a plan" do
cli = Bolt::CLI.new(%w[plan run foo --nodes bar])
cli.parse
expect(cli.bundled_content['Plan']).not_to be_empty
expect(cli.bundled_content['Task']).not_to be_empty
end
end
describe "execute" do
let(:executor) { double('executor', noop: false, subscribe: nil, shutdown: nil) }
let(:cli) { Bolt::CLI.new({}) }
let(:targets) { [target] }
let(:output) { StringIO.new }
let(:result_vals) { [{}] }
let(:fail_vals) { [{ '_error' => {} }] }
let(:result_set) do
results = targets.zip(result_vals).map do |t, r|
Bolt::Result.new(t, value: r)
end
Bolt::ResultSet.new(results)
end
let(:fail_set) do
results = targets.zip(fail_vals).map do |t, r|
Bolt::Result.new(t, value: r)
end
Bolt::ResultSet.new(results)
end
before :each do
allow(Bolt::Executor).to receive(:new).and_return(executor)
allow(executor).to receive(:log_plan) { |_plan_name, &block| block.call }
outputter = Bolt::Outputter::JSON.new(false, false, false, output)
allow(cli).to receive(:outputter).and_return(outputter)
end
context 'when running a command' do
let(:options) {
{
targets: targets,
subcommand: 'command',
action: 'run',
object: 'whoami'
}
}
it "executes the 'whoami' command" do
expect(executor)
.to receive(:run_command)
.with(targets, 'whoami', kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
expect(cli.execute(options)).to eq(0)
expect(JSON.parse(output.string)).to be
end
it "returns 2 if any node fails" do
expect(executor)
.to receive(:run_command)
.with(targets, 'whoami', kind_of(Hash))
.and_return(fail_set)
expect(cli.execute(options)).to eq(2)
end
end
context "when running a script" do
let(:script) { 'bar.sh' }
let(:options) {
{ targets: targets, subcommand: 'script', action: 'run', object: script,
leftovers: [] }
}
it "runs a script" do
stub_file(script)
expect(executor)
.to receive(:run_script)
.with(targets, script, [], kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
expect(cli.execute(options)).to eq(0)
expect(JSON.parse(output.string)).to be
end
it "errors for non-existent scripts" do
stub_non_existent_file(script)
expect { cli.execute(options) }.to raise_error(
Bolt::FileError, /The script '#{script}' does not exist/
)
expect(JSON.parse(output.string)).to be
end
it "errors for unreadable scripts" do
stub_unreadable_file(script)
expect { cli.execute(options) }.to raise_error(
Bolt::FileError, /The script '#{script}' is unreadable/
)
expect(JSON.parse(output.string)).to be
end
it "errors for scripts that aren't files" do
stub_directory(script)
expect { cli.execute(options) }.to raise_error(
Bolt::FileError, /The script '#{script}' is not a file/
)
expect(JSON.parse(output.string)).to be
end
it "returns 2 if any node fails" do
stub_file(script)
expect(executor).to receive(:run_script)
.with(targets, script, [], kind_of(Hash))
.and_return(fail_set)
expect(cli.execute(options)).to eq(2)
end
end
context "when showing available tasks", :reset_puppet_settings do
before :each do
cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')]
cli.config.format = 'json'
end
it "lists tasks with description" do
options = {
subcommand: 'task',
action: 'show'
}
cli.execute(options)
tasks = JSON.parse(output.string)['tasks']
[
['sample', nil],
['sample::echo', nil],
['sample::no_noop', 'Task with no noop'],
['sample::noop', 'Task with noop'],
['sample::notice', nil],
['sample::params', 'Task with parameters'],
['sample::ps_noop', 'Powershell task with noop'],
['sample::stdin', nil],
['sample::winstdin', nil]
].each do |taskdoc|
expect(tasks).to include(taskdoc)
end
end
it "lists modulepath" do
options = {
subcommand: 'task',
action: 'show'
}
cli.execute(options)
modulepath = JSON.parse(output.string)['modulepath']
expect(modulepath).to include(File.join(__FILE__, '../../fixtures/modules').to_s)
end
it "does not list a private task" do
options = {
subcommand: 'task',
action: 'show'
}
cli.execute(options)
tasks = JSON.parse(output.string)['tasks']
expect(tasks).not_to include(['sample::private', 'Do not list this task'])
end
it "shows invidual private task" do
task_name = 'sample::private'
options = {
subcommand: 'task',
action: 'show',
object: task_name
}
cli.execute(options)
json = JSON.parse(output.string)
json.delete("files")
expect(json).to eq(
"name" => "sample::private",
"metadata" => { "name" => "Private Task",
"description" => "Do not list this task",
"private" => true },
"module_dir" => File.absolute_path(File.join(__dir__, "..", "fixtures", "modules", "sample"))
)
end
it "shows an individual task data" do
task_name = 'sample::params'
options = {
subcommand: 'task',
action: 'show',
object: task_name
}
cli.execute(options)
json = JSON.parse(output.string)
json.delete("files")
expect(json).to eq(
"name" => "sample::params",
"module_dir" => File.absolute_path(File.join(__dir__, "..", "fixtures", "modules", "sample")),
"metadata" => {
"anything" => true,
"description" => "Task with parameters",
"extensions" => {},
"input_method" => 'stdin',
"parameters" => {
"mandatory_string" => {
"description" => "Mandatory string parameter",
"type" => "String[1, 10]"
},
"mandatory_integer" => {
"description" => "Mandatory integer parameter",
"type" => "Integer"
},
"mandatory_boolean" => {
"description" => "Mandatory boolean parameter",
"type" => "Boolean"
},
"non_empty_string" => {
"type" => "String[1]"
},
"optional_string" => {
"description" => "Optional string parameter",
"type" => "Optional[String]"
},
"optional_integer" => {
"description" => "Optional integer parameter",
"type" => "Optional[Integer[-5,5]]"
},
"no_type" => {
"description" => "A parameter without a type"
}
},
"supports_noop" => true
}
)
end
it "does not load inventory" do
options = {
subcommand: 'task',
action: 'show'
}
expect(cli).not_to receive(:inventory)
cli.execute(options)
end
end
context "when available tasks include an error", :reset_puppet_settings do
before :each do
cli.config.modulepath = [File.join(__FILE__, '../../fixtures/invalid_mods')]
cli.config.format = 'json'
end
it "task show prints a warning but shows other valid tasks" do
options = {
subcommand: 'task',
action: 'show'
}
cli.execute(options)
json = JSON.parse(output.string)['tasks']
tasks = [
["package", "Manage and inspect the state of packages"],
["service", "Manage and inspect the state of services"]
]
tasks.each do |task|
expect(json).to include(task)
end
output = @log_output.readlines.join
expect(output).to match(/unexpected token/)
end
end
context "when the task is not in the modulepath", :reset_puppet_settings do
before :each do
cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')]
end
it "task show displays an error" do
options = {
subcommand: 'task',
action: 'show',
object: 'abcdefg'
}
expect {
cli.execute(options)
}.to raise_error(
Bolt::Error,
'Could not find a task named "abcdefg". For a list of available tasks, run "bolt task show"'
)
end
end
context "when showing available plans", :reset_puppet_settings do
before :each do
cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')]
cli.config.format = 'json'
end
it "lists plans" do
options = {
subcommand: 'plan',
action: 'show'
}
cli.execute(options)
plan_list = JSON.parse(output.string)['plans']
[
['sample'],
['sample::single_task'],
['sample::three_tasks'],
['sample::two_tasks'],
['sample::yaml']
].each do |plan|
expect(plan_list).to include(plan)
end
end
it "lists modulepath" do
options = {
subcommand: 'plan',
action: 'show'
}
cli.execute(options)
modulepath = JSON.parse(output.string)['modulepath']
expect(modulepath).to include(File.join(__FILE__, '../../fixtures/modules').to_s)
end
it "shows an individual plan data" do
plan_name = 'sample::optional_params_task'
options = {
subcommand: 'plan',
action: 'show',
object: plan_name
}
cli.execute(options)
json = JSON.parse(output.string)
expect(json).to eq(
"name" => "sample::optional_params_task",
"module_dir" => File.absolute_path(File.join(__dir__, "..", "fixtures", "modules", "sample")),
"parameters" => {
"param_mandatory" => {
"type" => "String"
},
"param_optional" => {
"type" => "Optional[String]"
},
"param_with_default_value" => {
"type" => "String",
"default_value" => nil
}
}
)
end
it "shows an individual yaml plan data" do
plan_name = 'sample::yaml'
options = {
subcommand: 'plan',
action: 'show',
object: plan_name
}
cli.execute(options)
json = JSON.parse(output.string)
expect(json).to eq(
"name" => "sample::yaml",
"module_dir" => File.absolute_path(File.join(__dir__, "..", "fixtures", "modules", "sample")),
"parameters" => {
"nodes" => {
"type" => "TargetSpec"
},
"param_optional" => {
"type" => "Optional[String]",
"default_value" => nil
},
"param_with_default_value" => {
"type" => "String",
"default_value" => nil
}
}
)
end
it "does not load inventory" do
options = {
subcommand: 'plan',
action: 'show'
}
expect(cli).not_to receive(:inventory)
cli.execute(options)
end
end
context "when available plans include an error", :reset_puppet_settings do
before :each do
cli.config.modulepath = [File.join(__FILE__, '../../fixtures/invalid_mods')]
cli.config.format = 'json'
end
it "plan show prints a warning but shows other valid plans" do
options = {
subcommand: 'plan',
action: 'show'
}
cli.execute(options)
json = JSON.parse(output.string)['plans']
expect(json).to include(["aggregate::count"],
["aggregate::nodes"],
["canary"],
["facts"],
["facts::info"],
["puppetdb_fact"],
["sample::ok"])
expect(@log_output.readlines.join).to match(/Syntax error at.*single_task.pp/m)
end
it "plan run displays an error" do
plan_name = 'sample::single_task'
plan_params = { 'nodes' => targets.map(&:host).join(',') }
options = {
nodes: [],
subcommand: 'plan',
action: 'run',
object: plan_name,
task_options: plan_params
}
expect { cli.execute(options) }.to raise_error(/^Syntax error at/)
end
end
context "when the plan is not in the modulepath", :reset_puppet_settings do
before :each do
cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')]
end
it "plan show displays an error" do
options = {
subcommand: 'plan',
action: 'show',
object: 'abcdefg'
}
expect {
cli.execute(options)
}.to raise_error(
Bolt::Error,
'Could not find a plan named "abcdefg". For a list of available plans, run "bolt plan show"'
)
end
end
context "when running a task", :reset_puppet_settings do
let(:task_name) { +'sample::echo' }
let(:task_params) { { 'message' => 'hi' } }
let(:options) {
{
targets: targets,
subcommand: 'task',
action: 'run',
object: task_name,
task_options: task_params,
params_parsed: true
}
}
let(:input_method) { nil }
let(:task_path) { +'modules/sample/tasks/echo.sh$' }
let(:task_t) { task_type(task_name, Regexp.new(task_path), input_method) }
before :each do
allow(executor).to receive(:report_bundled_content)
cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')]
end
it "runs a task given a name" do
expect(executor)
.to receive(:run_task)
.with(targets, task_t, task_params, kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
expect(cli.execute(options)).to eq(0)
expect(JSON.parse(output.string)).to be
end
it "returns 2 if any node fails" do
expect(executor)
.to receive(:run_task)
.with(targets, task_t, task_params, kind_of(Hash))
.and_return(fail_set)
expect(cli.execute(options)).to eq(2)
end
it "errors for non-existent modules" do
task_name.replace 'dne::task1'
expect { cli.execute(options) }.to raise_error(
Bolt::Error, /Could not find a task named "dne::task1"/
)
expect(JSON.parse(output.string)).to be
end
it "errors for non-existent tasks" do
task_name.replace 'sample::dne'
expect { cli.execute(options) }.to raise_error(
Bolt::Error, /Could not find a task named "sample::dne"/
)
expect(JSON.parse(output.string)).to be
end
it "raises errors from the executor" do
task_params.clear
expect(executor)
.to receive(:run_task)
.with(targets, task_t, {}, kind_of(Hash))
.and_raise("Could not connect to target")
expect { cli.execute(options) }.to raise_error(/Could not connect to target/)
end
it "runs an init task given a module name" do
task_name.replace 'sample'
task_path.replace 'modules/sample/tasks/init.sh$'
expect(executor)
.to receive(:run_task)
.with(targets, task_t, task_params, kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
cli.execute(options)
expect(JSON.parse(output.string)).to be
end
context "input_method stdin" do
let(:input_method) { 'stdin' }
it "runs a task passing input on stdin" do
task_name.replace 'sample::stdin'
task_path.replace 'modules/sample/tasks/stdin.sh$'
expect(executor)
.to receive(:run_task)
.with(targets, task_t, task_params, kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
cli.execute(options)
expect(JSON.parse(output.string)).to be
end
it "runs a powershell task passing input on stdin" do
task_name.replace 'sample::winstdin'
task_path.replace 'modules/sample/tasks/winstdin.ps1$'
expect(executor)
.to receive(:run_task)
.with(targets, task_t, task_params, kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
cli.execute(options)
expect(JSON.parse(output.string)).to be
end
end
describe 'task parameters validation' do
let(:task_name) { +'sample::params' }
let(:task_params) { {} }
let(:input_method) { +'stdin' }
let(:task_path) { %r{modules/sample/tasks/params.sh$} }
it "errors when unknown parameters are specified" do
task_params.merge!(
'foo' => 'one',
'bar' => 'two'
)
expect { cli.execute(options) }.to raise_error(
Bolt::PAL::PALError,
/Task sample::params:\n(?x:
)\s*has no parameter named 'foo'\n(?x:
)\s*has no parameter named 'bar'/
)
expect(JSON.parse(output.string)).to be
end
it "errors when required parameters are not specified" do
task_params['mandatory_string'] = 'str'
expect { cli.execute(options) }.to raise_error(
Bolt::PAL::PALError,
/Task sample::params:\n(?x:
)\s*expects a value for parameter 'mandatory_integer'\n(?x:
)\s*expects a value for parameter 'mandatory_boolean'/
)
expect(JSON.parse(output.string)).to be
end
it "errors when the specified parameter values don't match the expected data types" do
task_params.merge!(
'mandatory_string' => 'str',
'mandatory_integer' => 10,
'mandatory_boolean' => 'str',
'non_empty_string' => 'foo',
'optional_string' => 10
)
expect { cli.execute(options) }.to raise_error(
Bolt::PAL::PALError,
/Task sample::params:\n(?x:
)\s*parameter 'mandatory_boolean' expects a Boolean value, got String\n(?x:
)\s*parameter 'optional_string' expects a value of type Undef or String,(?x:
) got Integer/
)
expect(JSON.parse(output.string)).to be
end
it "errors when the specified parameter values are outside of the expected ranges" do
task_params.merge!(
'mandatory_string' => '0123456789a',
'mandatory_integer' => 10,
'mandatory_boolean' => true,
'non_empty_string' => 'foo',
'optional_integer' => 10
)
expect { cli.execute(options) }.to raise_error(
Bolt::PAL::PALError,
/Task sample::params:\n(?x:
)\s*parameter 'mandatory_string' expects a String\[1, 10\] value, got String\n(?x:
)\s*parameter 'optional_integer' expects a value of type Undef or Integer\[-5, 5\],(?x:
) got Integer\[10, 10\]/
)
expect(JSON.parse(output.string)).to be
end
it "runs the task when the specified parameters are successfully validated" do
expect(executor)
.to receive(:run_task)
.with(targets, task_t, task_params, kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
task_params.merge!(
'mandatory_string' => ' ',
'mandatory_integer' => 0,
'mandatory_boolean' => false,
'non_empty_string' => 'foo'
)
cli.execute(options)
expect(JSON.parse(output.string)).to be
end
context "using the pcp transport with invalid tasks" do
let(:task_params) {
# these are not legal parameters for the 'sample::params' task
# according to the local task definition
{
'foo' => nil,
'bar' => nil
}
}
context "when some targets don't use the PCP transport" do
it "errors as usual if the task is not available locally" do
task_name.replace 'unknown::task'
expect { cli.execute(options) }.to raise_error(
Bolt::Error, /Could not find a task named "unknown::task"/
)
expect(JSON.parse(output.string)).to be
end
it "errors as usual if invalid (according to the local task definition) parameters are specified" do
expect { cli.execute(options) }.to raise_error(
Bolt::PAL::PALError,
/Task sample::params:\n(?x:
)\s*has no parameter named 'foo'\n(?x:
)\s*has no parameter named 'bar'/
)
expect(JSON.parse(output.string)).to be
end
end
context "when all targets use the PCP transport" do
let(:target) { Bolt::Target.new('pcp://foo') }
let(:task_t) { task_type(task_name, /\A\z/, nil) }
it "runs the task even when it is not installed locally" do
task_name.replace 'unknown::task'
expect(executor)
.to receive(:run_task)
.with(targets, task_t, task_params, kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
cli.execute(options)
expect(JSON.parse(output.string)).to be
end
it "runs the task even when invalid (according to the local task definition) parameters are specified" do
expect(executor)
.to receive(:run_task)
.with(targets, task_t, task_params, kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
cli.execute(options)
expect(JSON.parse(output.string)).to be
end
end
end
end
end
context "when running a plan", :reset_puppet_settings do
let(:plan_name) { +'sample::single_task' }
let(:plan_params) { { 'nodes' => targets.map(&:host).join(',') } }
let(:options) {
{
target_args: [],
subcommand: 'plan',
action: 'run',
object: plan_name,
task_options: plan_params
}
}
let(:task_t) { task_type('sample::echo', %r{modules/sample/tasks/echo.sh$}, nil) }
before :each do
allow(executor).to receive(:report_function_call)
allow(executor).to receive(:report_bundled_content)
cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')]
end
it "uses the nodes passed using the --nodes option(s) as the 'nodes' plan parameter" do
plan_params.clear
options[:target_args] = targets.map(&:host)
expect(executor)
.to receive(:run_task)
.with(targets, task_t, { 'message' => 'hi there' }, kind_of(Hash))
.and_return(Bolt::ResultSet.new([Bolt::Result.for_task(target, 'yes', '', 0, 'some_task')]))
expect(executor).to receive(:start_plan)
expect(executor).to receive(:log_plan)
expect(executor).to receive(:finish_plan)
cli.execute(options)
expect(JSON.parse(output.string)).to eq(
[{ 'node' => 'foo',
'target' => 'foo',
'status' => 'success',
'action' => 'task',
'object' => 'some_task',
'result' => { '_output' => 'yes' } }]
)
end
it "errors when the --nodes option(s) and the 'nodes' plan parameter are both specified" do
options[:target_args] = targets.map(&:host)
expect { cli.execute(options) }.to raise_error(
/A plan's 'nodes' parameter may be specified using the --nodes option, (?x:
)but in that case it must not be specified as a separate nodes=<value> (?x:
)parameter nor included in the JSON data passed in the --params option/
)
end
it "formats results of a passing task" do
expect(executor)
.to receive(:run_task)
.with(targets, task_t, { 'message' => 'hi there' }, kind_of(Hash))
.and_return(Bolt::ResultSet.new([Bolt::Result.for_task(target, 'yes', '', 0, 'some_task')]))
expect(executor).to receive(:start_plan)
expect(executor).to receive(:log_plan)
expect(executor).to receive(:finish_plan)
cli.execute(options)
expect(JSON.parse(output.string)).to eq(
[{ 'node' => 'foo',
'target' => 'foo',
'status' => 'success',
'action' => 'task',
'object' => 'some_task',
'result' => { '_output' => 'yes' } }]
)
end
it "raises errors from the executor" do
expect(executor)
.to receive(:run_task)
.with(targets, task_t, { 'message' => 'hi there' }, kind_of(Hash))
.and_raise("Could not connect to target")
expect(executor).to receive(:start_plan)
expect(executor).to receive(:log_plan)
expect(executor).to receive(:finish_plan)
expect(cli.execute(options)).to eq(1)
expect(JSON.parse(output.string)['msg']).to match(/Could not connect to target/)
end
it "formats results of a failing task" do
expect(executor)
.to receive(:run_task)
.with(targets, task_t, { 'message' => 'hi there' }, kind_of(Hash))
.and_return(Bolt::ResultSet.new([Bolt::Result.for_task(target, 'no', '', 1, 'some_task')]))
expect(executor).to receive(:start_plan)
expect(executor).to receive(:log_plan)
expect(executor).to receive(:finish_plan)
cli.execute(options)
expect(JSON.parse(output.string)).to eq(
[
{
'node' => 'foo',
'target' => 'foo',
'status' => 'failure',
'action' => 'task',
'object' => 'some_task',
'result' => {
"_output" => "no",
"_error" => {
"msg" => "The task failed with exit code 1",
"kind" => "puppetlabs.tasks/task-error",
"details" => { "exit_code" => 1 },
"issue_code" => "TASK_ERROR"
}
}
}
]
)
end
it "errors for non-existent plans" do
plan_name.replace 'sample::dne'
expect(executor).to receive(:start_plan)
expect(executor).to receive(:finish_plan)
expect(cli.execute(options)).to eq(1)
expect(JSON.parse(output.string)['msg']).to match(/Could not find a plan named "sample::dne"/)
end
end
describe "file uploading" do
let(:source) { '/path/to/local' }
let(:dest) { '/path/to/remote' }
let(:options) {
{
targets: targets,
subcommand: 'file',
action: 'upload',
object: source,
leftovers: [dest]
}
}
it "uploads a file via scp" do
stub_file(source)
expect(executor)
.to receive(:upload_file)
.with(targets, source, dest, kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
cli.execute(options)
expect(JSON.parse(output.string)).to be
end
it "uploads a directory via scp" do
stub_directory(source)
allow(Dir).to receive(:foreach).with(source)
expect(executor)
.to receive(:upload_file)
.with(targets, source, dest, kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
cli.execute(options)
expect(JSON.parse(output.string)).to be
end
it "returns 2 if any node fails" do
stub_file(source)
expect(executor)
.to receive(:upload_file)
.with(targets, source, dest, kind_of(Hash))
.and_return(fail_set)
expect(cli.execute(options)).to eq(2)
end
it "raises if the local file doesn't exist" do
stub_non_existent_file(source)
expect { cli.execute(options) }.to raise_error(
Bolt::FileError, /The source file '#{source}' does not exist/
)
expect(JSON.parse(output.string)).to be
end
it "errors if the local file is unreadable" do
stub_unreadable_file(source)
expect { cli.execute(options) }.to raise_error(
Bolt::FileError, /The source file '#{source}' is unreadable/
)
expect(JSON.parse(output.string)).to be
end
it "errors if a file in a subdirectory is unreadable" do
child_file = File.join(source, 'afile')
stub_directory(source)
stub_unreadable_file(child_file)
allow(Dir).to receive(:foreach).with(source).and_yield('afile')
expect { cli.execute(options) }.to raise_error(
Bolt::FileError, /The source file '#{child_file}' is unreadable/
)
expect(JSON.parse(output.string)).to be
end
end
end
describe "execute with noop" do
let(:executor) { double('executor', noop: true, subscribe: nil, shutdown: nil) }
let(:cli) { Bolt::CLI.new({}) }
let(:targets) { [target] }
let(:output) { StringIO.new }
let(:bundled_content) { ['test'] }
before :each do
allow(cli).to receive(:bundled_content).and_return(bundled_content)
expect(Bolt::Executor).to receive(:new).with(Bolt::Config.default.concurrency,
anything,
true).and_return(executor)
plugins = Bolt::Plugin.new(nil, nil, nil)
allow(cli).to receive(:plugins).and_return(plugins)
outputter = Bolt::Outputter::JSON.new(false, false, false, output)
allow(cli).to receive(:outputter).and_return(outputter)
allow(executor).to receive(:report_bundled_content)
end
context "when running a task", :reset_puppet_settings do
let(:task_name) { +'sample::noop' }
let(:task_params) { { 'message' => 'hi' } }
let(:options) {
{
targets: targets,
subcommand: 'task',
action: 'run',
object: task_name,
task_options: task_params,
noop: true
}
}
let(:task_t) { task_type(task_name, %r{modules/sample/tasks/noop.sh$}, nil) }
before :each do
cli.config.modulepath = [File.join(__FILE__, '../../fixtures/modules')]
end
it "runs a task that supports noop" do
expect(executor)
.to receive(:run_task)
.with(targets, task_t, task_params.merge('_noop' => true), kind_of(Hash))
.and_return(Bolt::ResultSet.new([]))
cli.execute(options)
expect(JSON.parse(output.string)).to be
end
it "errors on a task that doesn't support noop" do
task_name.replace 'sample::no_noop'
expect(executor).not_to receive(:run_task)
expect { cli.execute(options) }.to raise_error('Task does not support noop')
end
it "errors on a task without metadata" do
task_name.replace 'sample::echo'
expect(executor).not_to receive(:run_task)
expect { cli.execute(options) }.to raise_error('Task does not support noop')
end
end
end
describe "installing a Puppetfile" do
let(:options) {
{
subcommand: 'puppetfile',
action: 'run'
}
}
let(:output) { StringIO.new }
let(:puppetfile) { Pathname.new('path/to/puppetfile') }
let(:modulepath) { [Pathname.new('path/to/modules')] }
let(:action_stub) { double('r10k_action_puppetfile_install') }
let(:cli) { Bolt::CLI.new({}) }
before :each do
allow(cli).to receive(:outputter).and_return(Bolt::Outputter::JSON.new(false, false, false, output))
allow(puppetfile).to receive(:exist?).and_return(true)
allow_any_instance_of(Bolt::PAL).to receive(:generate_types)
allow(R10K::Action::Puppetfile::Install).to receive(:new).and_return(action_stub)
end
it 'fails if the Puppetfile does not exist' do
allow(puppetfile).to receive(:exist?).and_return(false)
expect do
cli.install_puppetfile({}, puppetfile, modulepath)
end.to raise_error(Bolt::FileError, /Could not find a Puppetfile/)
end
it 'installs to the first directory of the modulepath' do
expect(R10K::Action::Puppetfile::Install).to receive(:new)
.with({ root: puppetfile.dirname.to_s, puppetfile: puppetfile.to_s, moduledir: modulepath.first.to_s }, nil)
allow(action_stub).to receive(:call).and_return(true)
cli.install_puppetfile({}, puppetfile, modulepath)
end
it 'returns 0 and prints a result if successful' do
allow(action_stub).to receive(:call).and_return(true)
expect(cli.install_puppetfile({}, puppetfile, modulepath)).to eq(0)
result = JSON.parse(output.string)
expect(result['success']).to eq(true)
expect(result['puppetfile']).to eq(puppetfile.to_s)
expect(result['moduledir']).to eq(modulepath.first.to_s)
end
it 'returns 1 and prints a result if unsuccessful' do
allow(action_stub).to receive(:call).and_return(false)
expect(cli.install_puppetfile({}, puppetfile, modulepath)).to eq(1)
result = JSON.parse(output.string)
expect(result['success']).to eq(false)
expect(result['puppetfile']).to eq(puppetfile.to_s)
expect(result['moduledir']).to eq(modulepath.first.to_s)
end
it 'propagates any r10k errors' do
allow(action_stub).to receive(:call).and_raise(R10K::Error.new('everything is terrible'))
expect do
cli.install_puppetfile({}, puppetfile, modulepath)
end.to raise_error(Bolt::PuppetfileError, /everything is terrible/)
expect(output.string).to be_empty
end
end
describe "applying Puppet code" do
let(:options) {
{
subcommand: 'apply'
}
}
let(:output) { StringIO.new }
let(:cli) { Bolt::CLI.new([]) }
before :each do
allow(cli).to receive(:outputter).and_return(Bolt::Outputter::JSON.new(false, false, false, output))
end
it 'fails if the code file does not exist' do
manifest = Tempfile.new
options[:object] = manifest.path
manifest.close
manifest.delete
expect(cli).not_to receive(:apply_manifest)
expect { cli.execute(options) }.to raise_error(Bolt::FileError)
end
end
end
describe 'configfile' do
let(:configdir) { File.join(__dir__, '..', 'fixtures', 'configs') }
let(:modulepath) { [File.expand_path('/foo/bar'), File.expand_path('/baz/qux')] }
let(:complete_config) do
{ 'modulepath' => modulepath.join(File::PATH_SEPARATOR),
'inventoryfile' => File.join(__dir__, '..', 'fixtures', 'inventory', 'empty.yml'),
'concurrency' => 14,
'compile-concurrency' => 2,
'format' => 'json',
'log' => {
'console' => {
'level' => 'warn'
},
File.join(configdir, 'debug.log') => {
'level' => 'debug',
'append' => false
}
},
'ssh' => {
'private-key' => '/bar/foo',
'host-key-check' => false,
'connect-timeout' => 4,
'run-as' => 'Fakey McFakerson'
},
'winrm' => {
'connect-timeout' => 7,
'cacert' => '/path/to/winrm-cacert',
'extensions' => ['.py', '.bat'],
'ssl' => false,
'ssl-verify' => false
},
'pcp' => {
'task-environment' => 'testenv',
'service-url' => 'http://foo.org',
'token-file' => '/path/to/token',
'cacert' => '/path/to/cacert'
} }
end
it 'reads modulepath' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --no-host-key-check])
cli.parse
expect(cli.config.modulepath).to eq(modulepath)
end
end
it 'reads concurrency' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --no-host-key-check])
cli.parse
expect(cli.config.concurrency).to eq(14)
end
end
it 'reads compile-concurrency' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --no-host-key-check])
cli.parse
expect(cli.config.compile_concurrency).to eq(2)
end
end
it 'reads format' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --no-host-key-check])
cli.parse
expect(cli.config.format).to eq('json')
end
end
it 'reads log file' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --no-host-key-check])
cli.parse
normalized_path = File.expand_path(File.join(configdir, 'debug.log'))
expect(cli.config.log).to eq(
'console' => { level: 'warn' },
"file:#{normalized_path}" => { level: 'debug', append: false }
)
end
end
it 'reads private-key for ssh' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --no-host-key-check])
cli.parse
expect(cli.config.transports[:ssh]['private-key']).to eq('/bar/foo')
end
end
it 'reads host-key-check for ssh' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo])
cli.parse
expect(cli.config.transports[:ssh]['host-key-check']).to eq(false)
end
end
it 'reads run-as for ssh' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run r --configfile #{conf.path} --nodes foo --password bar --no-host-key-check])
cli.parse
expect(cli.config.transports[:ssh]['run-as']).to eq('Fakey McFakerson')
end
end
it 'reads separate connect-timeout for ssh and winrm' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --no-host-key-check --no-ssl])
cli.parse
expect(cli.config.transports[:ssh]['connect-timeout']).to eq(4)
expect(cli.config.transports[:winrm]['connect-timeout']).to eq(7)
end
end
it 'reads ssl for winrm' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo])
cli.parse
expect(cli.config.transports[:winrm]['ssl']).to eq(false)
end
end
it 'reads ssl-verify for winrm' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo])
cli.parse
expect(cli.config.transports[:winrm]['ssl-verify']).to eq(false)
end
end
it 'reads extensions for winrm' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --no-ssl])
cli.parse
expect(cli.config.transports[:winrm]['extensions']).to eq(['.py', '.bat'])
end
end
it 'reads task environment for pcp' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo])
cli.parse
expect(cli.config.transports[:pcp]['task-environment']).to eq('testenv')
end
end
it 'reads service url for pcp' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo])
cli.parse
expect(cli.config.transports[:pcp]['service-url']).to eql('http://foo.org')
end
end
it 'reads token file for pcp' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo])
cli.parse
expect(cli.config.transports[:pcp]['token-file']).to eql('/path/to/token')
end
end
it 'reads separate cacert file for pcp and winrm' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --no-host-key-check --no-ssl])
cli.parse
expect(cli.config.transports[:pcp]['cacert']).to eql('/path/to/cacert')
expect(cli.config.transports[:winrm]['cacert']).to eql('/path/to/winrm-cacert')
end
end
it 'CLI flags override config' do
with_tempfile_containing('conf', YAML.dump(complete_config)) do |conf|
cli = Bolt::CLI.new(%W[command run uptime --configfile #{conf.path} --nodes foo --concurrency 12])
cli.parse
expect(cli.config.concurrency).to eq(12)
end
end
it 'raises an error if a config file is specified and invalid' do
cli = Bolt::CLI.new(%W[command run uptime --configfile #{File.join(configdir, 'invalid.yml')} --nodes foo])
expect {
cli.parse
}.to raise_error(Bolt::FileError, /Could not parse/)
end
end
describe 'inventoryfile' do
let(:inventorydir) { File.join(__dir__, '..', 'fixtures', 'configs') }
it 'raises an error if an inventory file is specified and invalid' do
cli = Bolt::CLI.new(%W[command run uptime --inventoryfile #{File.join(inventorydir, 'invalid.yml')} --nodes foo])
expect {
cli.parse
}.to raise_error(Bolt::Error, /Could not parse/)
end
it 'lists targets the action would run on' do
cli = Bolt::CLI.new(%w[inventory show -t localhost])
expect_any_instance_of(Bolt::Outputter::Human).to receive(:print_targets)
cli.execute(cli.parse)
end
it 'lists groups in the inventory file' do
cli = Bolt::CLI.new(%w[group show])
expect_any_instance_of(Bolt::Outputter::Human).to receive(:print_groups)
cli.execute(cli.parse)
end
context 'with BOLT_INVENTORY set' do
before(:each) { ENV['BOLT_INVENTORY'] = '---' }
after(:each) { ENV.delete('BOLT_INVENTORY') }
it 'errors when BOLT_INVENTORY is set' do
cli = Bolt::CLI.new(%W[command run id --inventoryfile #{File.join(inventorydir, 'invalid.yml')} --nodes foo])
expect {
cli.parse
}.to raise_error(Bolt::Error, /BOLT_INVENTORY is set/)
end
end
end
end
| 1 | 13,048 | Maybe we could update these to use $future and then check on stderr? that way when we deprecate stdout we can not have to delete tests. | puppetlabs-bolt | rb |
@@ -21,7 +21,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http2
public void PrepareContinuation(Http2ContinuationFrameFlags flags, int streamId)
{
- PayloadLength = MinAllowedMaxFrameSize - HeaderLength;
+ PayloadLength = (int)_maxFrameSize;
Type = Http2FrameType.CONTINUATION;
ContinuationFlags = flags;
StreamId = streamId; | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http2
{
/* https://tools.ietf.org/html/rfc7540#section-6.10
+---------------------------------------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
*/
public partial class Http2Frame
{
public Http2ContinuationFrameFlags ContinuationFlags
{
get => (Http2ContinuationFrameFlags)Flags;
set => Flags = (byte)value;
}
public bool ContinuationEndHeaders => (ContinuationFlags & Http2ContinuationFrameFlags.END_HEADERS) == Http2ContinuationFrameFlags.END_HEADERS;
public void PrepareContinuation(Http2ContinuationFrameFlags flags, int streamId)
{
PayloadLength = MinAllowedMaxFrameSize - HeaderLength;
Type = Http2FrameType.CONTINUATION;
ContinuationFlags = flags;
StreamId = streamId;
}
}
}
| 1 | 16,409 | Remove this since it always has to be set afterwards. | aspnet-KestrelHttpServer | .cs |
@@ -11,7 +11,7 @@ import "testing"
// bob writes a multi-block file while unmerged, no conflicts
func TestCrUnmergedWriteMultiblockFile(t *testing.T) {
test(t,
- blockSize(20), users("alice", "bob"),
+ blockSize(20), blockChangeSize(20*1024), users("alice", "bob"),
as(alice,
mkdir("a"),
), | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
// These tests all do one conflict-free operation while a user is unstaged.
package test
import "testing"
// bob writes a multi-block file while unmerged, no conflicts
func TestCrUnmergedWriteMultiblockFile(t *testing.T) {
test(t,
blockSize(20), users("alice", "bob"),
as(alice,
mkdir("a"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/foo", "hello"),
),
as(bob, noSync(),
write("a/b", ntimesString(5, "0123456789")),
write("a/b", ntimesString(10, "0123456789")),
write("a/b", ntimesString(15, "0123456789")),
reenableUpdates(),
lsdir("a/", m{"b": "FILE", "foo": "FILE"}),
read("a/b", ntimesString(15, "0123456789")),
read("a/foo", "hello"),
),
as(alice,
lsdir("a/", m{"b": "FILE", "foo": "FILE"}),
read("a/b", ntimesString(15, "0123456789")),
read("a/foo", "hello"),
),
)
}
// bob writes a multi-block file that conflicts with a file created by alice
func TestCrConflictUnmergedWriteMultiblockFile(t *testing.T) {
test(t,
blockSize(20), users("alice", "bob"),
as(alice,
mkdir("a"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", "hello"),
),
as(bob, noSync(),
write("a/b", ntimesString(15, "0123456789")),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "hello"),
read(crname("a/b", bob), ntimesString(15, "0123456789")),
),
as(alice,
lsdir("a/", m{"b$": "FILE", crnameEsc("b", bob): "FILE"}),
read("a/b", "hello"),
read(crname("a/b", bob), ntimesString(15, "0123456789")),
),
)
}
// alice writes a multi-block file that conflicts with a directory
// created by alice
func TestCrConflictMergedWriteMultiblockFile(t *testing.T) {
test(t,
blockSize(20), users("alice", "bob"),
as(alice,
mkdir("a"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/b", ntimesString(15, "0123456789")),
),
as(bob, noSync(),
write("a/b/c", "hello"),
reenableUpdates(),
lsdir("a/", m{"b$": "DIR", crnameEsc("b", alice): "FILE"}),
read("a/b/c", "hello"),
read(crname("a/b", alice), ntimesString(15, "0123456789")),
),
as(alice,
lsdir("a/", m{"b$": "DIR", crnameEsc("b", alice): "FILE"}),
read("a/b/c", "hello"),
read(crname("a/b", alice), ntimesString(15, "0123456789")),
),
)
}
// bob resurrects a file that was removed by alice
func TestCrConflictWriteToRemovedMultiblockFile(t *testing.T) {
test(t,
blockSize(20), users("alice", "bob"),
as(alice,
mkdir("a"),
write("a/b", ntimesString(15, "0123456789")),
),
as(bob,
disableUpdates(),
),
as(alice,
rm("a/b"),
),
as(bob, noSync(),
write("a/b", ntimesString(15, "9876543210")),
reenableUpdates(),
lsdir("a/", m{"b$": "FILE"}),
read("a/b", ntimesString(15, "9876543210")),
),
as(alice,
lsdir("a/", m{"b$": "FILE"}),
read("a/b", ntimesString(15, "9876543210")),
),
)
}
// bob makes a file that was removed by alice executable
func TestCrConflictSetexToRemovedMultiblockFile(t *testing.T) {
test(t,
blockSize(20), users("alice", "bob"),
as(alice,
mkdir("a"),
write("a/b", ntimesString(15, "0123456789")),
),
as(bob,
disableUpdates(),
),
as(alice,
rm("a/b"),
),
as(bob, noSync(),
setex("a/b", true),
reenableUpdates(),
lsdir("a/", m{"b$": "EXEC"}),
read("a/b", ntimesString(15, "0123456789")),
),
as(alice,
lsdir("a/", m{"b$": "EXEC"}),
read("a/b", ntimesString(15, "0123456789")),
),
)
}
// bob moves a file that was removed by alice
func TestCrConflictMoveRemovedMultiblockFile(t *testing.T) {
test(t,
blockSize(20), users("alice", "bob"),
as(alice,
mkdir("a"),
write("a/b", ntimesString(15, "0123456789")),
),
as(bob,
disableUpdates(),
),
as(alice,
rm("a/b"),
),
as(bob, noSync(),
rename("a/b", "a/c"),
reenableUpdates(),
lsdir("a/", m{"c$": "FILE"}),
read("a/c", ntimesString(15, "0123456789")),
),
as(alice,
lsdir("a/", m{"c$": "FILE"}),
read("a/c", ntimesString(15, "0123456789")),
),
)
}
// bob writes a multi-block file while unmerged and the block change
// size is small, no conflicts
func TestCrUnmergedWriteMultiblockFileWithSmallBlockChangeSize(t *testing.T) {
test(t,
blockSize(20), blockChangeSize(5), users("alice", "bob"),
as(alice,
mkdir("a"),
),
as(bob,
disableUpdates(),
),
as(alice,
write("a/foo", "hello"),
),
as(bob, noSync(),
write("a/b", ntimesString(15, "0123456789")),
reenableUpdates(),
lsdir("a/", m{"b": "FILE", "foo": "FILE"}),
read("a/b", ntimesString(15, "0123456789")),
read("a/foo", "hello"),
),
as(alice,
lsdir("a/", m{"b": "FILE", "foo": "FILE"}),
read("a/b", ntimesString(15, "0123456789")),
read("a/foo", "hello"),
),
)
}
// bob moves a multi-block file, and then deletes its parents.
func TestCrUnmergedMoveAndDeleteMultiblockFile(t *testing.T) {
test(t,
blockSize(20), users("alice", "bob"),
as(alice,
write("a/b/c/d", ntimesString(15, "0123456789")),
),
as(bob,
disableUpdates(),
),
as(alice,
write("foo", "bar"),
),
as(bob, noSync(),
rename("a/b/c/d", "a/b/c/e"),
rm("a/b/c/e"),
rmdir("a/b/c"),
rmdir("a/b"),
reenableUpdates(),
lsdir("a/", m{}),
read("foo", "bar"),
),
as(alice,
lsdir("a/", m{}),
read("foo", "bar"),
),
)
}
| 1 | 15,560 | These two tests were causing us to use too many goroutines with `-race` using the default block change size, I think due to prefetching. | keybase-kbfs | go |
@@ -25,8 +25,13 @@ class LeafNode(Node):
self.reader = reader
self.is_leaf = True
- def fetch(self, startTime, endTime):
- return self.reader.fetch(startTime, endTime)
+ def fetch(self, startTime, endTime, now=None, requestContext=None):
+ try:
+ result = self.reader.fetch(startTime, endTime, now, requestContext)
+ except TypeError:
+ result = self.reader.fetch(startTime, endTime)
+
+ return result
@property
def intervals(self): | 1 |
class Node(object):
__slots__ = ('name', 'path', 'local', 'is_leaf')
def __init__(self, path):
self.path = path
self.name = path.split('.')[-1]
self.local = True
self.is_leaf = False
def __repr__(self):
return '<%s[%x]: %s>' % (self.__class__.__name__, id(self), self.path)
class BranchNode(Node):
pass
class LeafNode(Node):
__slots__ = ('reader', 'intervals')
def __init__(self, path, reader):
Node.__init__(self, path)
self.reader = reader
self.is_leaf = True
def fetch(self, startTime, endTime):
return self.reader.fetch(startTime, endTime)
@property
def intervals(self):
return self.reader.get_intervals()
def __repr__(self):
return '<LeafNode[%x]: %s (%s)>' % (id(self), self.path, self.reader)
| 1 | 11,407 | Is that `try..except` block really needed? I mean, when it could fail? | graphite-project-graphite-web | py |
@@ -101,7 +101,7 @@ module.exports = function(config) {
files: [
{ pattern: 'test/polyfills.js', watched: false },
- { pattern: config.grep || '{debug,hooks,compat,test-utils,}/test/{browser,shared}/**.test.js', watched: false }
+ { pattern: config.grep || '{debug,hooks,compat,test-utils,}/test/{browser,shared}/suspense.test.js', watched: false }
],
preprocessors: { | 1 | /*eslint no-var:0, object-shorthand:0 */
var coverage = String(process.env.COVERAGE) === 'true',
ci = String(process.env.CI).match(/^(1|true)$/gi),
pullRequest = !String(process.env.TRAVIS_PULL_REQUEST).match(/^(0|false|undefined)$/gi),
masterBranch = String(process.env.TRAVIS_BRANCH).match(/^master$/gi),
sauceLabs = ci && !pullRequest && masterBranch,
performance = !coverage && String(process.env.PERFORMANCE) !== 'false',
webpack = require('webpack'),
path = require('path');
var sauceLabsLaunchers = {
sl_chrome: {
base: 'SauceLabs',
browserName: 'chrome',
platform: 'Windows 10'
},
sl_firefox: {
base: 'SauceLabs',
browserName: 'firefox',
platform: 'Windows 10'
},
sl_safari: {
base: 'SauceLabs',
browserName: 'Safari',
version: '11',
platform: 'OS X 10.13'
},
sl_edge: {
base: 'SauceLabs',
browserName: 'MicrosoftEdge',
platform: 'Windows 10'
},
sl_ie_11: {
base: 'SauceLabs',
browserName: 'internet explorer',
version: '11.0',
platform: 'Windows 7'
}
};
var localLaunchers = {
ChromeNoSandboxHeadless: {
base: 'Chrome',
flags: [
'--no-sandbox',
// See https://chromium.googlesource.com/chromium/src/+/lkgr/headless/README.md
'--headless',
'--disable-gpu',
// Without a remote debugging port, Google Chrome exits immediately.
'--remote-debugging-port=9333'
]
}
};
module.exports = function(config) {
config.set({
browsers: sauceLabs
? Object.keys(sauceLabsLaunchers)
: Object.keys(localLaunchers),
frameworks: ['source-map-support', 'mocha', 'chai-sinon'],
reporters: ['mocha'].concat(
coverage ? 'coverage' : [],
sauceLabs ? 'saucelabs' : []
),
coverageReporter: {
dir: path.join(__dirname, 'coverage'),
reporters: [
{ type: 'text-summary' },
{ type: 'html' },
{ type: 'lcovonly', subdir: '.', file: 'lcov.info' }
]
},
mochaReporter: {
showDiff: true
},
browserLogOptions: { terminal: true },
browserConsoleLogOptions: { terminal: true },
browserNoActivityTimeout: 5 * 60 * 1000,
// Use only two browsers concurrently, works better with open source Sauce Labs remote testing
concurrency: 2,
captureTimeout: 0,
sauceLabs: {
build: 'CI #' + process.env.TRAVIS_BUILD_NUMBER + ' (' + process.env.TRAVIS_BUILD_ID + ')',
tunnelIdentifier: process.env.TRAVIS_JOB_NUMBER || ('local'+require('./package.json').version),
connectLocationForSERelay: 'localhost',
connectPortForSERelay: 4445,
startConnect: false
},
customLaunchers: sauceLabs ? sauceLabsLaunchers : localLaunchers,
files: [
{ pattern: 'test/polyfills.js', watched: false },
{ pattern: config.grep || '{debug,hooks,compat,test-utils,}/test/{browser,shared}/**.test.js', watched: false }
],
preprocessors: {
'{debug,hooks,compat,test-utils,}/test/**/*': ['webpack', 'sourcemap']
},
webpack: {
output: {
filename: '[name].js'
},
mode: 'development',
devtool: 'inline-source-map',
module: {
noParse: [
/benchmark\.js$/
],
/* Transpile source and test files */
rules: [
{
enforce: 'pre',
test: /\.jsx?$/,
exclude: /node_modules/,
loader: 'babel-loader',
options: {
comments: false,
compact: true,
plugins: coverage ?
[['istanbul', {
exclude: [
// Default config
'coverage/**',
'dist/**',
'test/**',
'test{,-*}.js',
'**/*.test.js',
'**/__tests__/**',
'**/node_modules/**',
// Our custom extension
'{debug,hooks,compat,test-utils}/test/**/*'
]
}]] : []
}
}
]
},
resolve: {
// The React DevTools integration requires preact as a module
// rather than referencing source files inside the module
// directly
alias: {
'preact/compat': path.join(__dirname, './compat/src'),
'preact/hooks': path.join(__dirname, './hooks/src'),
'preact/test-utils': path.join(__dirname, './test-utils/src'),
preact: path.join(__dirname, './src')
}
},
plugins: [
new webpack.DefinePlugin({
coverage: coverage,
NODE_ENV: JSON.stringify(process.env.NODE_ENV || ''),
ENABLE_PERFORMANCE: performance,
DISABLE_FLAKEY: !!String(process.env.FLAKEY).match(/^(0|false)$/gi)
})
],
performance: {
hints: false
}
},
webpackMiddleware: {
noInfo: true,
stats: 'errors-only'
}
});
};
| 1 | 13,520 | If you only want to run a specific group of tests you can always use `.only` like `it.only()` or `describe.only()`. It may not speed up the globbing process as much as this change here :) | preactjs-preact | js |
@@ -65,7 +65,13 @@ LAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'
LAMBDA_ZIP_FILE_NAME = 'original_lambda_archive.zip'
LAMBDA_JAR_FILE_NAME = 'original_lambda_archive.jar'
-DEFAULT_BATCH_SIZE = 10
+INVALID_PARAMETER_VALUE_EXCEPTION = 'InvalidParameterValueException'
+
+BATCH_SIZE_MAP = {
+ 'kinesis': (100, 10000),
+ 'dynamodb': (100, 1000),
+ 'sqs': (10, 10)
+}
app = Flask(APP_NAME)
| 1 | import re
import os
import imp
import sys
import json
import uuid
import time
import base64
import logging
import threading
import traceback
import hashlib
import functools
from io import BytesIO
from datetime import datetime
from six.moves import cStringIO as StringIO
from six.moves.urllib.parse import urlparse
from flask import Flask, Response, jsonify, request
from localstack import config
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services import generic_proxy
from localstack.utils.aws import aws_stack, aws_responses
from localstack.services.awslambda import lambda_executors
from localstack.services.awslambda.lambda_executors import (
LAMBDA_RUNTIME_PYTHON27,
LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_PYTHON37,
LAMBDA_RUNTIME_PYTHON38,
LAMBDA_RUNTIME_NODEJS,
LAMBDA_RUNTIME_NODEJS610,
LAMBDA_RUNTIME_NODEJS810,
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA11,
LAMBDA_RUNTIME_DOTNETCORE2,
LAMBDA_RUNTIME_DOTNETCORE21,
LAMBDA_RUNTIME_GOLANG,
LAMBDA_RUNTIME_RUBY,
LAMBDA_RUNTIME_RUBY25,
LAMBDA_RUNTIME_PROVIDED)
from localstack.utils.common import (to_str, load_file, save_file, TMP_FILES, ensure_readable,
mkdir, unzip, is_zip_file, zip_contains_jar_entries, run, short_uid,
timestamp_millis, parse_chunked_data, now_utc, safe_requests, FuncThread,
isoformat_milliseconds)
from localstack.utils.analytics import event_publisher
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched
APP_NAME = 'lambda_api'
PATH_ROOT = '/2015-03-31'
ARCHIVE_FILE_PATTERN = '%s/lambda.handler.*.jar' % config.TMP_FOLDER
LAMBDA_SCRIPT_PATTERN = '%s/lambda_script_*.py' % config.TMP_FOLDER
# List of Lambda runtime names. Keep them in this list, mainly to silence the linter
LAMBDA_RUNTIMES = [LAMBDA_RUNTIME_PYTHON27, LAMBDA_RUNTIME_PYTHON36, LAMBDA_RUNTIME_PYTHON37,
LAMBDA_RUNTIME_PYTHON38, LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE21,
LAMBDA_RUNTIME_NODEJS, LAMBDA_RUNTIME_NODEJS610, LAMBDA_RUNTIME_NODEJS810,
LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11, LAMBDA_RUNTIME_RUBY, LAMBDA_RUNTIME_RUBY25]
# default timeout in seconds
LAMBDA_DEFAULT_TIMEOUT = 3
# default handler and runtime
LAMBDA_DEFAULT_HANDLER = 'handler.handler'
LAMBDA_DEFAULT_RUNTIME = LAMBDA_RUNTIME_PYTHON38
LAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'
LAMBDA_ZIP_FILE_NAME = 'original_lambda_archive.zip'
LAMBDA_JAR_FILE_NAME = 'original_lambda_archive.jar'
DEFAULT_BATCH_SIZE = 10
app = Flask(APP_NAME)
# map ARN strings to lambda function objects
arn_to_lambda = {}
# list of event source mappings for the API
event_source_mappings = []
# logger
LOG = logging.getLogger(__name__)
# mutex for access to CWD and ENV
exec_mutex = threading.Semaphore(1)
# whether to use Docker for execution
DO_USE_DOCKER = None
# start characters indicating that a lambda result should be parsed as JSON
JSON_START_CHAR_MAP = {
list: ('[',),
tuple: ('[',),
dict: ('{',),
str: ('"',),
bytes: ('"',),
bool: ('t', 'f'),
type(None): ('n',),
int: ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'),
float: ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
}
POSSIBLE_JSON_TYPES = (str, bytes)
JSON_START_TYPES = tuple(set(JSON_START_CHAR_MAP.keys()) - set(POSSIBLE_JSON_TYPES))
JSON_START_CHARS = tuple(set(functools.reduce(lambda x, y: x + y, JSON_START_CHAR_MAP.values())))
# SQS listener thread settings
SQS_LISTENER_THREAD = {}
SQS_POLL_INTERVAL_SEC = 1
# lambda executor instance
LAMBDA_EXECUTOR = lambda_executors.AVAILABLE_EXECUTORS.get(config.LAMBDA_EXECUTOR, lambda_executors.DEFAULT_EXECUTOR)
# IAM policy constants
IAM_POLICY_VERSION = '2012-10-17'
POLICY_NAME_PATTERN = 'lambda_policy_%s'
# Marker name to indicate that a bucket represents the local file system. This is used for testing
# Serverless applications where we mount the Lambda code directly into the container from the host OS.
BUCKET_MARKER_LOCAL = '__local__'
class ClientError(Exception):
def __init__(self, msg, code=400):
super(ClientError, self).__init__(msg)
self.code = code
self.msg = msg
def get_response(self):
if isinstance(self.msg, Response):
return self.msg
return error_response(self.msg, self.code)
class LambdaContext(object):
def __init__(self, func_details, qualifier=None):
self.function_name = func_details.name()
self.function_version = func_details.get_qualifier_version(qualifier)
self.invoked_function_arn = func_details.arn()
if qualifier:
self.invoked_function_arn += ':' + qualifier
def get_remaining_time_in_millis(self):
# TODO implement!
return 1000 * 60
def cleanup():
global event_source_mappings, arn_to_lambda
arn_to_lambda = {}
event_source_mappings = []
LAMBDA_EXECUTOR.cleanup()
def func_arn(function_name):
return aws_stack.lambda_function_arn(function_name)
def add_function_mapping(lambda_name, lambda_handler, lambda_cwd=None):
arn = func_arn(lambda_name)
arn_to_lambda[arn].versions.get('$LATEST')['Function'] = lambda_handler
arn_to_lambda[arn].cwd = lambda_cwd
def add_event_source(function_name, source_arn, enabled, batch_size=None):
batch_size = batch_size or DEFAULT_BATCH_SIZE
mapping = {
'UUID': str(uuid.uuid4()),
'StateTransitionReason': 'User action',
'LastModified': float(time.mktime(datetime.utcnow().timetuple())),
'BatchSize': batch_size,
'State': 'Enabled' if enabled is True or enabled is None else 'Disabled',
'FunctionArn': func_arn(function_name),
'EventSourceArn': source_arn,
'LastProcessingResult': 'OK',
'StartingPosition': LAMBDA_DEFAULT_STARTING_POSITION
}
event_source_mappings.append(mapping)
return mapping
def update_event_source(uuid_value, function_name, enabled, batch_size):
for m in event_source_mappings:
if uuid_value == m['UUID']:
if function_name:
m['FunctionArn'] = func_arn(function_name)
m['BatchSize'] = batch_size
m['State'] = 'Enabled' if enabled is True else 'Disabled'
m['LastModified'] = float(time.mktime(datetime.utcnow().timetuple()))
return m
return {}
def delete_event_source(uuid_value):
for i, m in enumerate(event_source_mappings):
if uuid_value == m['UUID']:
return event_source_mappings.pop(i)
return {}
def use_docker():
global DO_USE_DOCKER
if DO_USE_DOCKER is None:
DO_USE_DOCKER = False
if 'docker' in config.LAMBDA_EXECUTOR:
try:
run('docker images', print_error=False)
DO_USE_DOCKER = True
except Exception:
pass
return DO_USE_DOCKER
def process_apigateway_invocation(func_arn, path, payload, headers={},
resource_path=None, method=None, path_params={},
query_string_params={}, request_context={}):
try:
resource_path = resource_path or path
event = {
'path': path,
'headers': dict(headers),
'pathParameters': dict(path_params),
'body': payload,
'isBase64Encoded': False,
'resource': resource_path,
'httpMethod': method,
'queryStringParameters': query_string_params,
'requestContext': request_context,
'stageVariables': {} # TODO
}
return run_lambda(event=event, context={}, func_arn=func_arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on API Gateway message: %s %s' % (e, traceback.format_exc()))
def process_sns_notification(func_arn, topic_arn, subscriptionArn, message, message_attributes, subject='',):
event = {
'Records': [{
'EventSource': 'localstack:sns',
'EventVersion': '1.0',
'EventSubscriptionArn': subscriptionArn,
'Sns': {
'Type': 'Notification',
'TopicArn': topic_arn,
'Subject': subject,
'Message': message,
'Timestamp': timestamp_millis(),
'MessageAttributes': message_attributes
}
}]
}
return run_lambda(event=event, context={}, func_arn=func_arn, asynchronous=True)
def process_kinesis_records(records, stream_name):
def chunks(lst, n):
# Yield successive n-sized chunks from lst.
for i in range(0, len(lst), n):
yield lst[i:i + n]
# feed records into listening lambdas
try:
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
sources = get_event_sources(source_arn=stream_arn)
for source in sources:
arn = source['FunctionArn']
for chunk in chunks(records, source['BatchSize']):
event = {
'Records': [
{
'eventID': 'shardId-000000000000:{0}'.format(rec['sequenceNumber']),
'eventSourceARN': stream_arn,
'kinesis': rec
}
for rec in chunk
]
}
run_lambda(event=event, context={}, func_arn=arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on Kinesis records: %s %s' % (e, traceback.format_exc()))
def start_lambda_sqs_listener():
if SQS_LISTENER_THREAD:
return
def send_event_to_lambda(queue_arn, queue_url, lambda_arn, messages, region):
def delete_messages(result, func_arn, event, error=None, dlq_sent=None, **kwargs):
if error and not dlq_sent:
# Skip deleting messages from the queue in case of processing errors AND if
# the message has not yet been sent to a dead letter queue (DLQ).
# We'll pick them up and retry next time they become available on the queue.
return
sqs_client = aws_stack.connect_to_service('sqs')
entries = [{'Id': r['receiptHandle'], 'ReceiptHandle': r['receiptHandle']} for r in records]
sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=entries)
records = []
for msg in messages:
records.append({
'body': msg['Body'],
'receiptHandle': msg['ReceiptHandle'],
'md5OfBody': msg['MD5OfBody'],
'eventSourceARN': queue_arn,
'eventSource': lambda_executors.EVENT_SOURCE_SQS,
'awsRegion': region,
'messageId': msg['MessageId'],
'attributes': msg.get('Attributes', {}),
'messageAttributes': msg.get('MessageAttributes', {}),
'md5OfMessageAttributes': msg.get('MD5OfMessageAttributes'),
'sqs': True,
})
event = {'Records': records}
# TODO implement retries, based on "RedrivePolicy.maxReceiveCount" in the queue settings
run_lambda(event=event, context={}, func_arn=lambda_arn, asynchronous=True, callback=delete_messages)
def listener_loop(*args):
while True:
try:
sources = get_event_sources(source_arn=r'.*:sqs:.*')
if not sources:
# Temporarily disable polling if no event sources are configured
# anymore. The loop will get restarted next time a message
# arrives and if an event source is configured.
SQS_LISTENER_THREAD.pop('_thread_')
return
sqs_client = aws_stack.connect_to_service('sqs')
for source in sources:
queue_arn = source['EventSourceArn']
lambda_arn = source['FunctionArn']
batch_size = max(min(source.get('BatchSize', 1), 10), 1)
try:
region_name = queue_arn.split(':')[3]
queue_url = aws_stack.sqs_queue_url_for_arn(queue_arn)
result = sqs_client.receive_message(
QueueUrl=queue_url,
MessageAttributeNames=['All'],
MaxNumberOfMessages=batch_size
)
messages = result.get('Messages')
if not messages:
continue
send_event_to_lambda(queue_arn, queue_url, lambda_arn, messages, region=region_name)
except Exception as e:
LOG.debug('Unable to poll SQS messages for queue %s: %s' % (queue_arn, e))
except Exception:
pass
finally:
time.sleep(SQS_POLL_INTERVAL_SEC)
LOG.debug('Starting SQS message polling thread for Lambda API')
SQS_LISTENER_THREAD['_thread_'] = FuncThread(listener_loop)
SQS_LISTENER_THREAD['_thread_'].start()
def process_sqs_message(queue_name, region_name=None):
# feed message into the first listening lambda (message should only get processed once)
try:
region_name = region_name or aws_stack.get_region()
queue_arn = aws_stack.sqs_queue_arn(queue_name, region_name=region_name)
sources = get_event_sources(source_arn=queue_arn)
arns = [s.get('FunctionArn') for s in sources]
LOG.debug('Found %s source mappings for event from SQS queue %s: %s' % (len(arns), queue_arn, arns))
source = (sources or [None])[0]
if not source:
return False
start_lambda_sqs_listener()
return True
except Exception as e:
LOG.warning('Unable to run Lambda function on SQS messages: %s %s' % (e, traceback.format_exc()))
def get_event_sources(func_name=None, source_arn=None):
result = []
for m in event_source_mappings:
if not func_name or (m['FunctionArn'] in [func_name, func_arn(func_name)]):
if _arn_match(mapped=m['EventSourceArn'], searched=source_arn):
result.append(m)
return result
def _arn_match(mapped, searched):
if not searched or mapped == searched:
return True
# Some types of ARNs can end with a path separated by slashes, for
# example the ARN of a DynamoDB stream is tableARN/stream/ID. It's
# a little counterintuitive that a more specific mapped ARN can
# match a less specific ARN on the event, but some integration tests
# rely on it for things like subscribing to a stream and matching an
# event labeled with the table ARN.
if re.match(r'^%s$' % searched, mapped):
return True
if mapped.startswith(searched):
suffix = mapped[len(searched):]
return suffix[0] == '/'
return False
def get_function_version(arn, version):
func = arn_to_lambda.get(arn)
return format_func_details(func, version=version, always_add_version=True)
def publish_new_function_version(arn):
func_details = arn_to_lambda.get(arn)
versions = func_details.versions
last_version = func_details.max_version()
versions[str(last_version + 1)] = {
'CodeSize': versions.get('$LATEST').get('CodeSize'),
'CodeSha256': versions.get('$LATEST').get('CodeSha256'),
'Function': versions.get('$LATEST').get('Function'),
'RevisionId': str(uuid.uuid4())
}
return get_function_version(arn, str(last_version + 1))
def do_list_versions(arn):
return sorted([get_function_version(arn, version) for version in
arn_to_lambda.get(arn).versions.keys()], key=lambda k: str(k.get('Version')))
def do_update_alias(arn, alias, version, description=None):
new_alias = {
'AliasArn': arn + ':' + alias,
'FunctionVersion': version,
'Name': alias,
'Description': description or '',
'RevisionId': str(uuid.uuid4())
}
arn_to_lambda.get(arn).aliases[alias] = new_alias
return new_alias
@cloudwatched('lambda')
def run_lambda(event, context, func_arn, version=None, suppress_output=False,
asynchronous=False, callback=None):
if suppress_output:
stdout_ = sys.stdout
stderr_ = sys.stderr
stream = StringIO()
sys.stdout = stream
sys.stderr = stream
try:
func_arn = aws_stack.fix_arn(func_arn)
func_details = arn_to_lambda.get(func_arn)
if not func_details:
return not_found_error(msg='The resource specified in the request does not exist.')
if not context:
context = LambdaContext(func_details, version)
result = LAMBDA_EXECUTOR.execute(func_arn, func_details, event, context=context,
version=version, asynchronous=asynchronous, callback=callback)
except Exception as e:
return error_response('Error executing Lambda function %s: %s %s' % (func_arn, e, traceback.format_exc()))
finally:
if suppress_output:
sys.stdout = stdout_
sys.stderr = stderr_
return result
def exec_lambda_code(script, handler_function='handler', lambda_cwd=None, lambda_env=None):
if lambda_cwd or lambda_env:
exec_mutex.acquire()
if lambda_cwd:
previous_cwd = os.getcwd()
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
if lambda_env:
previous_env = dict(os.environ)
os.environ.update(lambda_env)
# generate lambda file name
lambda_id = 'l_%s' % short_uid()
lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id)
save_file(lambda_file, script)
# delete temporary .py and .pyc files on exit
TMP_FILES.append(lambda_file)
TMP_FILES.append('%sc' % lambda_file)
try:
pre_sys_modules_keys = set(sys.modules.keys())
try:
handler_module = imp.load_source(lambda_id, lambda_file)
module_vars = handler_module.__dict__
finally:
# the above import can bring files for the function
# (eg settings.py) into the global namespace. subsequent
# calls can pick up file from another function, causing
# general issues.
post_sys_modules_keys = set(sys.modules.keys())
for key in post_sys_modules_keys:
if key not in pre_sys_modules_keys:
sys.modules.pop(key)
except Exception as e:
LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc()))
raise e
finally:
if lambda_cwd or lambda_env:
if lambda_cwd:
os.chdir(previous_cwd)
sys.path.pop(0)
if lambda_env:
os.environ = previous_env
exec_mutex.release()
return module_vars[handler_function]
def get_handler_file_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
# TODO: support Java Lambdas in the future
if runtime.startswith(LAMBDA_RUNTIME_PROVIDED):
return 'bootstrap'
delimiter = '.'
if runtime.startswith(LAMBDA_RUNTIME_NODEJS):
file_ext = '.js'
elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):
file_ext = ''
elif runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE2) or runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE21):
file_ext = '.dll'
delimiter = ':'
elif runtime.startswith(LAMBDA_RUNTIME_RUBY):
file_ext = '.rb'
else:
handler_name = handler_name.rpartition(delimiter)[0].replace(delimiter, os.path.sep)
file_ext = '.py'
return '%s%s' % (handler_name.split(delimiter)[0], file_ext)
def get_handler_function_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
# TODO: support Java Lambdas in the future
if runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE2) or runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE21):
return handler_name.split(':')[-1]
else:
return handler_name.split('.')[-1]
def error_response(msg, code=500, error_type='InternalFailure'):
LOG.warning(msg)
return aws_responses.flask_error_response(msg, code=code, error_type=error_type)
def get_zip_bytes(function_code):
"""Returns the ZIP file contents from a FunctionCode dict.
:type function_code: dict
:param function_code: https://docs.aws.amazon.com/lambda/latest/dg/API_FunctionCode.html
:returns: bytes of the Zip file.
"""
if 'S3Bucket' in function_code:
s3_client = aws_stack.connect_to_service('s3')
bytes_io = BytesIO()
try:
s3_client.download_fileobj(function_code['S3Bucket'], function_code['S3Key'], bytes_io)
zip_file_content = bytes_io.getvalue()
except Exception as e:
raise ClientError('Unable to fetch Lambda archive from S3: %s' % e, 404)
elif 'ZipFile' in function_code:
zip_file_content = function_code['ZipFile']
zip_file_content = base64.b64decode(zip_file_content)
else:
raise ClientError('No valid Lambda archive specified.')
return zip_file_content
def get_java_handler(zip_file_content, main_file, func_details=None):
"""Creates a Java handler from an uploaded ZIP or JAR.
:type zip_file_content: bytes
:param zip_file_content: ZIP file bytes.
:type handler: str
:param handler: The lambda handler path.
:type main_file: str
:param main_file: Filepath to the uploaded ZIP or JAR file.
:returns: function or flask.Response
"""
if is_zip_file(zip_file_content):
def execute(event, context):
result = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(
event, context, main_file=main_file, func_details=func_details)
return result
return execute
raise ClientError(error_response(
'Unable to extract Java Lambda handler - file is not a valid zip/jar file', 400, error_type='ValidationError'))
def set_archive_code(code, lambda_name, zip_file_content=None):
# get metadata
lambda_arn = func_arn(lambda_name)
lambda_details = arn_to_lambda[lambda_arn]
is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
if is_local_mount and config.LAMBDA_REMOTE_DOCKER:
msg = 'Please note that Lambda mounts (bucket name "%s") cannot be used with LAMBDA_REMOTE_DOCKER=1'
raise Exception(msg % BUCKET_MARKER_LOCAL)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(lambda_arn)
if is_local_mount:
# Mount or use a local folder lambda executors can reference
# WARNING: this means we're pointing lambda_cwd to a local path in the user's
# file system! We must ensure that there is no data loss (i.e., we must *not* add
# this folder to TMP_FILES or similar).
return code['S3Key']
# get file content
zip_file_content = zip_file_content or get_zip_bytes(code)
# Save the zip file to a temporary file that the lambda executors can reference
code_sha_256 = base64.standard_b64encode(hashlib.sha256(zip_file_content).digest())
lambda_details.get_version('$LATEST')['CodeSize'] = len(zip_file_content)
lambda_details.get_version('$LATEST')['CodeSha256'] = code_sha_256.decode('utf-8')
tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
mkdir(tmp_dir)
tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
save_file(tmp_file, zip_file_content)
TMP_FILES.append(tmp_dir)
lambda_details.cwd = tmp_dir
return tmp_dir
def set_function_code(code, lambda_name, lambda_cwd=None):
def generic_handler(event, context):
raise ClientError(('Unable to find executor for Lambda function "%s". Note that ' +
'Node.js, Golang, and .Net Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)
arn = func_arn(lambda_name)
lambda_details = arn_to_lambda[arn]
runtime = lambda_details.runtime
lambda_environment = lambda_details.envvars
handler_name = lambda_details.handler = lambda_details.handler or LAMBDA_DEFAULT_HANDLER
code_passed = code
code = code or lambda_details.code
is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
zip_file_content = None
if code_passed:
lambda_cwd = lambda_cwd or set_archive_code(code_passed, lambda_name)
if not is_local_mount:
# Save the zip file to a temporary file that the lambda executors can reference
zip_file_content = get_zip_bytes(code_passed)
else:
lambda_cwd = lambda_cwd or lambda_details.cwd
# get local lambda working directory
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
if not zip_file_content:
zip_file_content = load_file(tmp_file, mode='rb')
# Set the appropriate lambda handler.
lambda_handler = generic_handler
is_java = lambda_executors.is_java_lambda(runtime)
if is_java:
# The Lambda executors for Docker subclass LambdaExecutorContainers, which
# runs Lambda in Docker by passing all *.jar files in the function working
# directory as part of the classpath. Obtain a Java handler function below.
lambda_handler = get_java_handler(zip_file_content, tmp_file, func_details=lambda_details)
if not is_local_mount:
# Lambda code must be uploaded in Zip format
if not is_zip_file(zip_file_content):
raise ClientError(
'Uploaded Lambda code for runtime ({}) is not in Zip format'.format(runtime))
# Unzipping should only be required for (1) non-Java Lambdas, or (2) zip files containing JAR files
if not is_java or zip_contains_jar_entries(zip_file_content, 'lib/'):
unzip(tmp_file, lambda_cwd)
# Obtain handler details for any non-Java Lambda function
if not is_java:
handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
handler_function = get_handler_function_from_name(handler_name, runtime=runtime)
main_file = '%s/%s' % (lambda_cwd, handler_file)
if not os.path.exists(main_file):
# Raise an error if (1) this is not a local mount lambda, or (2) we're
# running Lambdas locally (not in Docker), or (3) we're using remote Docker.
# -> We do *not* want to raise an error if we're using local mount in non-remote Docker
if not is_local_mount or not use_docker() or config.LAMBDA_REMOTE_DOCKER:
file_list = run('cd "%s"; du -d 3 .' % lambda_cwd)
config_debug = ('Config for local mount, docker, remote: "%s", "%s", "%s"' %
(is_local_mount, use_docker(), config.LAMBDA_REMOTE_DOCKER))
LOG.debug('Lambda archive content:\n%s' % file_list)
raise ClientError(error_response(
'Unable to find handler script (%s) in Lambda archive. %s' % (main_file, config_debug),
400, error_type='ValidationError'))
if runtime.startswith('python') and not use_docker():
try:
# make sure the file is actually readable, then read contents
ensure_readable(main_file)
zip_file_content = load_file(main_file, mode='rb')
# extract handler
lambda_handler = exec_lambda_code(
zip_file_content,
handler_function=handler_function,
lambda_cwd=lambda_cwd,
lambda_env=lambda_environment)
except Exception as e:
raise ClientError('Unable to get handler function from lambda code.', e)
add_function_mapping(lambda_name, lambda_handler, lambda_cwd)
return {'FunctionName': lambda_name}
def do_list_functions():
funcs = []
for f_arn, func in arn_to_lambda.items():
if type(func) != LambdaFunction:
continue
func_name = f_arn.split(':function:')[-1]
arn = func_arn(func_name)
func_details = arn_to_lambda.get(arn)
if not func_details:
# this can happen if we're accessing Lambdas from a different region (ARN mismatch)
continue
funcs.append(format_func_details(func_details))
return funcs
def format_func_details(func_details, version=None, always_add_version=False):
version = version or '$LATEST'
func_version = func_details.get_version(version)
result = {
'CodeSha256': func_version.get('CodeSha256'),
'Role': func_details.role,
'Version': version,
'FunctionArn': func_details.arn(),
'FunctionName': func_details.name(),
'CodeSize': func_version.get('CodeSize'),
'Handler': func_details.handler,
'Runtime': func_details.runtime,
'Timeout': func_details.timeout,
'Description': func_details.description,
'MemorySize': func_details.memory_size,
'LastModified': func_details.last_modified,
'TracingConfig': {'Mode': 'PassThrough'},
'RevisionId': func_version.get('RevisionId'),
'State': 'Active',
'LastUpdateStatus': 'Successful'
}
if func_details.dead_letter_config:
result['DeadLetterConfig'] = func_details.dead_letter_config
if func_details.envvars:
result['Environment'] = {
'Variables': func_details.envvars
}
if (always_add_version or version != '$LATEST') and len(result['FunctionArn'].split(':')) <= 7:
result['FunctionArn'] += ':%s' % version
return result
def forward_to_fallback_url(func_arn, data):
""" If LAMBDA_FALLBACK_URL is configured, forward the invocation of this non-existing
Lambda to the configured URL. """
if not config.LAMBDA_FALLBACK_URL:
return None
if config.LAMBDA_FALLBACK_URL.startswith('dynamodb://'):
table_name = urlparse(config.LAMBDA_FALLBACK_URL.replace('dynamodb://', 'http://')).netloc
dynamodb = aws_stack.connect_to_service('dynamodb')
item = {
'id': {'S': short_uid()},
'timestamp': {'N': str(now_utc())},
'payload': {'S': str(data)}
}
aws_stack.create_dynamodb_table(table_name, partition_key='id')
dynamodb.put_item(TableName=table_name, Item=item)
return ''
if re.match(r'^https?://.+', config.LAMBDA_FALLBACK_URL):
response = safe_requests.post(config.LAMBDA_FALLBACK_URL, data)
return response.content
raise ClientError('Unexpected value for LAMBDA_FALLBACK_URL: %s' % config.LAMBDA_FALLBACK_URL)
def get_lambda_policy(function):
iam_client = aws_stack.connect_to_service('iam')
policies = iam_client.list_policies(Scope='Local', MaxItems=500)['Policies']
docs = []
for p in policies:
# !TODO: Cache policy documents instead of running N+1 API calls here!
versions = iam_client.list_policy_versions(PolicyArn=p['Arn'])['Versions']
default_version = [v for v in versions if v.get('IsDefaultVersion')]
versions = default_version or versions
doc = versions[0]['Document']
doc = doc if isinstance(doc, dict) else json.loads(doc)
if not isinstance(doc['Statement'], list):
doc['Statement'] = [doc['Statement']]
for stmt in doc['Statement']:
stmt['Principal'] = stmt.get('Principal') or {'AWS': TEST_AWS_ACCOUNT_ID}
doc['PolicyArn'] = p['Arn']
doc['Id'] = 'default'
docs.append(doc)
policy = [d for d in docs if d['Statement'][0]['Resource'] == func_arn(function)]
return (policy or [None])[0]
def not_found_error(ref=None, msg=None):
if not msg:
msg = 'The resource you requested does not exist.'
if ref:
msg = '%s not found: %s' % ('Function' if ':function:' in ref else 'Resource', ref)
return error_response(msg, 404, error_type='ResourceNotFoundException')
# ------------
# API METHODS
# ------------
@app.before_request
def before_request():
# fix to enable chunked encoding, as this is used by some Lambda clients
transfer_encoding = request.headers.get('Transfer-Encoding', '').lower()
if transfer_encoding == 'chunked':
request.environ['wsgi.input_terminated'] = True
@app.route('%s/functions' % PATH_ROOT, methods=['POST'])
def create_function():
""" Create new function
---
operationId: 'createFunction'
parameters:
- name: 'request'
in: body
"""
arn = 'n/a'
try:
data = json.loads(to_str(request.data))
lambda_name = data['FunctionName']
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_CREATE_FUNC,
payload={'n': event_publisher.get_hash(lambda_name)})
arn = func_arn(lambda_name)
if arn in arn_to_lambda:
return error_response('Function already exist: %s' %
lambda_name, 409, error_type='ResourceConflictException')
arn_to_lambda[arn] = func_details = LambdaFunction(arn)
func_details.versions = {'$LATEST': {'RevisionId': str(uuid.uuid4())}}
func_details.last_modified = isoformat_milliseconds(datetime.utcnow()) + '+0000'
func_details.description = data.get('Description', '')
func_details.handler = data['Handler']
func_details.runtime = data['Runtime']
func_details.envvars = data.get('Environment', {}).get('Variables', {})
func_details.tags = data.get('Tags', {})
func_details.timeout = data.get('Timeout', LAMBDA_DEFAULT_TIMEOUT)
func_details.role = data['Role']
func_details.memory_size = data.get('MemorySize')
func_details.code = data['Code']
func_details.set_dead_letter_config(data)
result = set_function_code(func_details.code, lambda_name)
if isinstance(result, Response):
del arn_to_lambda[arn]
return result
# remove content from code attribute, if present
func_details.code.pop('ZipFile', None)
# prepare result
result.update(format_func_details(func_details))
if data.get('Publish', False):
result['Version'] = publish_new_function_version(arn)['Version']
return jsonify(result or {})
except Exception as e:
arn_to_lambda.pop(arn, None)
if isinstance(e, ClientError):
return e.get_response()
return error_response('Unknown error: %s %s' % (e, traceback.format_exc()))
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['GET'])
def get_function(function):
""" Get details for a single function
---
operationId: 'getFunction'
parameters:
- name: 'request'
in: body
- name: 'function'
in: path
"""
funcs = do_list_functions()
for func in funcs:
if func['FunctionName'] == function:
result = {
'Configuration': func,
'Code': {
'Location': '%s/code' % request.url
}
}
lambda_details = arn_to_lambda.get(func['FunctionArn'])
if lambda_details.concurrency is not None:
result['Concurrency'] = lambda_details.concurrency
return jsonify(result)
return not_found_error(func_arn(function))
@app.route('%s/functions/' % PATH_ROOT, methods=['GET'])
def list_functions():
""" List functions
---
operationId: 'listFunctions'
parameters:
- name: 'request'
in: body
"""
funcs = do_list_functions()
result = {}
result['Functions'] = funcs
return jsonify(result)
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['DELETE'])
def delete_function(function):
""" Delete an existing function
---
operationId: 'deleteFunction'
parameters:
- name: 'request'
in: body
"""
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
try:
arn_to_lambda.pop(arn)
except KeyError:
return not_found_error(func_arn(function))
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_DELETE_FUNC,
payload={'n': event_publisher.get_hash(function)})
i = 0
while i < len(event_source_mappings):
mapping = event_source_mappings[i]
if mapping['FunctionArn'] == arn:
del event_source_mappings[i]
i -= 1
i += 1
result = {}
return jsonify(result)
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['PUT'])
def update_function_code(function):
""" Update the code of an existing function
---
operationId: 'updateFunctionCode'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
result = set_function_code(data, function)
arn = func_arn(function)
func_details = arn_to_lambda.get(arn)
result.update(format_func_details(func_details))
if isinstance(result, Response):
return result
return jsonify(result or {})
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['GET'])
def get_function_code(function):
""" Get the code of an existing function
---
operationId: 'getFunctionCode'
parameters:
"""
arn = func_arn(function)
lambda_cwd = arn_to_lambda[arn].cwd
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
return Response(load_file(tmp_file, mode='rb'),
mimetype='application/zip',
headers={'Content-Disposition': 'attachment; filename=lambda_archive.zip'})
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['GET'])
def get_function_configuration(function):
""" Get the configuration of an existing function
---
operationId: 'getFunctionConfiguration'
parameters:
"""
arn = func_arn(function)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return not_found_error(arn)
result = format_func_details(lambda_details)
return jsonify(result)
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['PUT'])
def update_function_configuration(function):
""" Update the configuration of an existing function
---
operationId: 'updateFunctionConfiguration'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return error_response('Unable to find Lambda function ARN "%s"' % arn,
404, error_type='ResourceNotFoundException')
if data.get('Handler'):
lambda_details.handler = data['Handler']
if data.get('Runtime'):
lambda_details.runtime = data['Runtime']
lambda_details.set_dead_letter_config(data)
env_vars = data.get('Environment', {}).get('Variables')
if env_vars is not None:
lambda_details.envvars = env_vars
if data.get('Timeout'):
lambda_details.timeout = data['Timeout']
return jsonify(data)
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['POST'])
def add_permission(function):
data = json.loads(to_str(request.data))
iam_client = aws_stack.connect_to_service('iam')
sid = data.get('StatementId')
policy = {
'Version': IAM_POLICY_VERSION,
'Id': 'LambdaFuncAccess-%s' % sid,
'Statement': [{
'Sid': sid,
'Effect': 'Allow',
# TODO: 'Principal' in policies not yet supported in upstream moto
# 'Principal': data.get('Principal') or {'AWS': TEST_AWS_ACCOUNT_ID},
'Action': data.get('Action'),
'Resource': func_arn(function)
}]
}
iam_client.create_policy(PolicyName=POLICY_NAME_PATTERN % function,
PolicyDocument=json.dumps(policy), Description='Policy for Lambda function "%s"' % function)
result = {'Statement': sid}
return jsonify(result)
@app.route('%s/functions/<function>/policy/<statement>' % PATH_ROOT, methods=['DELETE'])
def remove_permission(function, statement):
qualifier = request.args.get('Qualifier')
iam_client = aws_stack.connect_to_service('iam')
policy = get_lambda_policy(function)
if not policy:
return error_response('Unable to find policy for Lambda function "%s"' % function,
404, error_type='ResourceNotFoundException')
iam_client.delete_policy(PolicyArn=policy['PolicyArn'])
result = {
'FunctionName': function,
'Qualifier': qualifier,
'StatementId': policy['Statement'][0]['Sid'],
}
return jsonify(result)
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['GET'])
def get_policy(function):
policy = get_lambda_policy(function)
if not policy:
return error_response('The resource you requested does not exist.',
404, error_type='ResourceNotFoundException')
return jsonify({'Policy': json.dumps(policy), 'RevisionId': 'test1234'})
@app.route('%s/functions/<function>/invocations' % PATH_ROOT, methods=['POST'])
def invoke_function(function):
""" Invoke an existing function
---
operationId: 'invokeFunction'
parameters:
- name: 'request'
in: body
"""
# function here can either be an arn or a function name
arn = func_arn(function)
# arn can also contain a qualifier, extract it from there if so
m = re.match('(arn:aws:lambda:.*:.*:function:[a-zA-Z0-9-_]+)(:.*)?', arn)
if m and m.group(2):
qualifier = m.group(2)[1:]
arn = m.group(1)
else:
qualifier = request.args.get('Qualifier')
data = request.get_data()
if data:
data = to_str(data)
try:
data = json.loads(data)
except Exception:
try:
# try to read chunked content
data = json.loads(parse_chunked_data(data))
except Exception:
return error_response('The payload is not JSON: %s' % data, 415,
error_type='UnsupportedMediaTypeException')
# Default invocation type is RequestResponse
invocation_type = request.environ.get('HTTP_X_AMZ_INVOCATION_TYPE', 'RequestResponse')
def _create_response(result, status_code=200):
""" Create the final response for the given invocation result """
if isinstance(result, Response):
return result
details = {
'StatusCode': status_code,
'Payload': result,
'Headers': {}
}
if isinstance(result, dict):
for key in ('StatusCode', 'Payload', 'FunctionError'):
if result.get(key):
details[key] = result[key]
# Try to parse parse payload as JSON
was_json = False
payload = details['Payload']
if payload and isinstance(payload, POSSIBLE_JSON_TYPES) and payload[0] in JSON_START_CHARS:
try:
details['Payload'] = json.loads(details['Payload'])
was_json = True
except Exception:
pass
# Set error headers
if details.get('FunctionError'):
details['Headers']['X-Amz-Function-Error'] = str(details['FunctionError'])
# Construct response object
response_obj = details['Payload']
if was_json or isinstance(response_obj, JSON_START_TYPES):
response_obj = jsonify(response_obj)
details['Headers']['Content-Type'] = 'application/json'
else:
response_obj = str(response_obj)
details['Headers']['Content-Type'] = 'text/plain'
return response_obj, details['StatusCode'], details['Headers']
# check if this lambda function exists
not_found = None
if arn not in arn_to_lambda:
not_found = not_found_error(arn)
elif qualifier and not arn_to_lambda.get(arn).qualifier_exists(qualifier):
not_found = not_found_error('{0}:{1}'.format(arn, qualifier))
if not_found:
forward_result = forward_to_fallback_url(func_arn, data)
if forward_result is not None:
return _create_response(forward_result)
return not_found
if invocation_type == 'RequestResponse':
result = run_lambda(asynchronous=False, func_arn=arn, event=data, context={}, version=qualifier)
return _create_response(result)
elif invocation_type == 'Event':
run_lambda(asynchronous=True, func_arn=arn, event=data, context={}, version=qualifier)
return _create_response('', status_code=202)
elif invocation_type == 'DryRun':
# Assume the dry run always passes.
return _create_response('', status_code=204)
return error_response('Invocation type not one of: RequestResponse, Event or DryRun',
code=400, error_type='InvalidParameterValueException')
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['GET'])
def list_event_source_mappings():
""" List event source mappings
---
operationId: 'listEventSourceMappings'
"""
event_source_arn = request.args.get('EventSourceArn')
function_name = request.args.get('FunctionName')
mappings = event_source_mappings
if event_source_arn:
mappings = [m for m in mappings if event_source_arn == m.get('EventSourceArn')]
if function_name:
function_arn = func_arn(function_name)
mappings = [m for m in mappings if function_arn == m.get('FunctionArn')]
response = {
'EventSourceMappings': mappings
}
return jsonify(response)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['GET'])
def get_event_source_mapping(mapping_uuid):
""" Get an existing event source mapping
---
operationId: 'getEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
mappings = event_source_mappings
mappings = [m for m in mappings if mapping_uuid == m.get('UUID')]
if len(mappings) == 0:
return not_found_error()
return jsonify(mappings[0])
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['POST'])
def create_event_source_mapping():
""" Create new event source mapping
---
operationId: 'createEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
mapping = add_event_source(
data['FunctionName'], data['EventSourceArn'], data.get('Enabled'), data.get('BatchSize')
)
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['PUT'])
def update_event_source_mapping(mapping_uuid):
""" Update an existing event source mapping
---
operationId: 'updateEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(request.data)
if not mapping_uuid:
return jsonify({})
function_name = data.get('FunctionName') or ''
enabled = data.get('Enabled', True)
batch_size = data.get('BatchSize') or 100
mapping = update_event_source(mapping_uuid, function_name, enabled, batch_size)
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['DELETE'])
def delete_event_source_mapping(mapping_uuid):
""" Delete an event source mapping
---
operationId: 'deleteEventSourceMapping'
"""
if not mapping_uuid:
return jsonify({})
mapping = delete_event_source(mapping_uuid)
return jsonify(mapping)
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['POST'])
def publish_version(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify(publish_new_function_version(arn))
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['GET'])
def list_versions(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify({'Versions': do_list_versions(arn)})
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['POST'])
def create_alias(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
data = json.loads(request.data)
alias = data.get('Name')
if alias in arn_to_lambda.get(arn).aliases:
return error_response('Alias already exists: %s' % arn + ':' + alias, 404,
error_type='ResourceConflictException')
version = data.get('FunctionVersion')
description = data.get('Description')
return jsonify(do_update_alias(arn, alias, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['PUT'])
def update_alias(function, name):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
if name not in arn_to_lambda.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
current_alias = arn_to_lambda.get(arn).aliases.get(name)
data = json.loads(request.data)
version = data.get('FunctionVersion') or current_alias.get('FunctionVersion')
description = data.get('Description') or current_alias.get('Description')
return jsonify(do_update_alias(arn, name, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['GET'])
def get_alias(function, name):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
if name not in arn_to_lambda.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
return jsonify(arn_to_lambda.get(arn).aliases.get(name))
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['GET'])
def list_aliases(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify({'Aliases': sorted(arn_to_lambda.get(arn).aliases.values(),
key=lambda x: x['Name'])})
@app.route('/<version>/functions/<function>/concurrency', methods=['PUT'])
def put_concurrency(version, function):
# the version for put_concurrency != PATH_ROOT, at the time of this
# writing it's: /2017-10-31 for this endpoint
# https://docs.aws.amazon.com/lambda/latest/dg/API_PutFunctionConcurrency.html
arn = func_arn(function)
data = json.loads(request.data)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return not_found_error(arn)
lambda_details.concurrency = data
return jsonify(data)
@app.route('/<version>/tags/<arn>', methods=['GET'])
def list_tags(version, arn):
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
result = {'Tags': func_details.tags}
return jsonify(result)
@app.route('/<version>/tags/<arn>', methods=['POST'])
def tag_resource(version, arn):
data = json.loads(request.data)
tags = data.get('Tags', {})
if tags:
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
if func_details:
func_details.tags.update(tags)
return jsonify({})
@app.route('/<version>/tags/<arn>', methods=['DELETE'])
def untag_resource(version, arn):
tag_keys = request.args.getlist('tagKeys')
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
for tag_key in tag_keys:
func_details.tags.pop(tag_key, None)
return jsonify({})
@app.route('/2019-09-25/functions/<function>/event-invoke-config', methods=['PUT'])
def put_function_event_invoke_config(function):
""" Updates the configuration for asynchronous invocation for a function
---
operationId: PutFunctionEventInvokeConfig
parameters:
- name: 'function'
in: path
- name: 'qualifier'
in: path
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
function_arn = func_arn(function)
lambda_obj = arn_to_lambda[function_arn]
response = lambda_obj.put_function_event_invoke_config(data)
return jsonify({
'LastModified': response.last_modified,
'FunctionArn': str(function_arn),
'MaximumRetryAttempts': response.max_retry_attempts,
'MaximumEventAgeInSeconds': response.max_event_age,
'DestinationConfig': {
'OnSuccess': {
'Destination': str(response.on_successful_invocation)
},
'OnFailure': {
'Destination': str(response.dead_letter_config)
}
}
})
@app.route('/2019-09-25/functions/<function>/event-invoke-config', methods=['GET'])
def get_function_event_invoke_config(function):
""" Retrieves the configuration for asynchronous invocation for a function
---
operationId: GetFunctionEventInvokeConfig
parameters:
- name: 'function'
in: path
- name: 'qualifier'
in: path
- name: 'request'
in: body
"""
try:
function_arn = func_arn(function)
lambda_obj = arn_to_lambda[function_arn]
except Exception as e:
return error_response(str(e), 400)
response = lambda_obj.get_function_event_invoke_config()
return jsonify(response)
def serve(port, quiet=True):
# initialize the Lambda executor
LAMBDA_EXECUTOR.startup()
generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)
| 1 | 10,812 | nit: I'd probably rename this to `BATCH_SIZE_RANGES`, to use a slightly more descriptive name. | localstack-localstack | py |
@@ -1322,7 +1322,8 @@ public class MenuEntrySwapperPlugin extends Plugin
if (this.swapCoalBag)
{
- menuManager.addPriorityEntry("Empty", "Coal bag");
+ menuManager.addPriorityEntry("Empty", "Coal bag").setPriority(90);
+ menuManager.addPriorityEntry("Fill", "Coal bag").setPriority(100);
}
if (this.swapBones) | 1 | /*
* Copyright (c) 2018, Adam <[email protected]>
* Copyright (c) 2018, Kamiel
* Copyright (c) 2019, alanbaumgartner <https://github.com/alanbaumgartner>
* Copyright (c) 2019, Kyle <https://github.com/kyleeld>
* Copyright (c) 2019, lucouswin <https://github.com/lucouswin>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.menuentryswapper;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.inject.Provides;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import net.runelite.api.Client;
import net.runelite.api.GameState;
import net.runelite.api.InventoryID;
import net.runelite.api.Item;
import net.runelite.api.ItemDefinition;
import net.runelite.api.MenuAction;
import static net.runelite.api.MenuAction.MENU_ACTION_DEPRIORITIZE_OFFSET;
import static net.runelite.api.MenuAction.WALK;
import net.runelite.api.MenuEntry;
import net.runelite.api.NPC;
import net.runelite.api.Player;
import net.runelite.api.Varbits;
import static net.runelite.api.Varbits.BUILDING_MODE;
import net.runelite.api.WorldType;
import net.runelite.api.coords.WorldPoint;
import net.runelite.api.events.ConfigChanged;
import net.runelite.api.events.FocusChanged;
import net.runelite.api.events.GameStateChanged;
import net.runelite.api.events.MenuEntryAdded;
import net.runelite.api.events.MenuOpened;
import net.runelite.api.events.MenuOptionClicked;
import net.runelite.api.events.PostItemDefinition;
import net.runelite.api.events.VarbitChanged;
import net.runelite.api.events.WidgetMenuOptionClicked;
import net.runelite.api.widgets.WidgetInfo;
import net.runelite.client.callback.ClientThread;
import net.runelite.client.config.ConfigManager;
import net.runelite.client.eventbus.EventBus;
import net.runelite.client.game.ItemManager;
import net.runelite.client.game.ItemVariationMapping;
import net.runelite.client.input.KeyManager;
import net.runelite.client.menus.AbstractComparableEntry;
import static net.runelite.client.menus.ComparableEntries.newBankComparableEntry;
import static net.runelite.client.menus.ComparableEntries.newBaseComparableEntry;
import net.runelite.client.menus.MenuManager;
import net.runelite.client.menus.WidgetMenuOption;
import net.runelite.client.plugins.Plugin;
import net.runelite.client.plugins.PluginDependency;
import net.runelite.client.plugins.PluginDescriptor;
import net.runelite.client.plugins.PluginManager;
import net.runelite.client.plugins.PluginType;
import net.runelite.client.plugins.menuentryswapper.util.BurningAmuletMode;
import net.runelite.client.plugins.menuentryswapper.util.CharterOption;
import net.runelite.client.plugins.menuentryswapper.util.CombatBraceletMode;
import net.runelite.client.plugins.menuentryswapper.util.ConstructionCapeMode;
import net.runelite.client.plugins.menuentryswapper.util.DigsitePendantMode;
import net.runelite.client.plugins.menuentryswapper.util.DuelingRingMode;
import net.runelite.client.plugins.menuentryswapper.util.FairyRingMode;
import net.runelite.client.plugins.menuentryswapper.util.GamesNecklaceMode;
import net.runelite.client.plugins.menuentryswapper.util.GloryMode;
import net.runelite.client.plugins.menuentryswapper.util.HouseMode;
import net.runelite.client.plugins.menuentryswapper.util.MaxCapeMode;
import net.runelite.client.plugins.menuentryswapper.util.NecklaceOfPassageMode;
import net.runelite.client.plugins.menuentryswapper.util.ObeliskMode;
import net.runelite.client.plugins.menuentryswapper.util.OccultAltarMode;
import net.runelite.client.plugins.menuentryswapper.util.QuestCapeMode;
import net.runelite.client.plugins.menuentryswapper.util.RingOfWealthMode;
import net.runelite.client.plugins.menuentryswapper.util.SkillsNecklaceMode;
import net.runelite.client.plugins.menuentryswapper.util.SlayerRingMode;
import net.runelite.client.plugins.menuentryswapper.util.XericsTalismanMode;
import net.runelite.client.plugins.pvptools.PvpToolsConfig;
import net.runelite.client.plugins.pvptools.PvpToolsPlugin;
import static net.runelite.client.util.MenuUtil.swap;
import net.runelite.client.util.MiscUtils;
import net.runelite.client.util.Text;
import org.apache.commons.lang3.ArrayUtils;
@PluginDescriptor(
name = "Menu Entry Swapper",
description = "Change the default option that is displayed when hovering over objects",
tags = {"npcs", "inventory", "items", "objects"},
type = PluginType.UTILITY,
enabledByDefault = false
)
@Singleton
@PluginDependency(PvpToolsPlugin.class)
public class MenuEntrySwapperPlugin extends Plugin
{
private static final String CONFIGURE = "Configure";
private static final String SAVE = "Save";
private static final String RESET = "Reset";
private static final String MENU_TARGET = "Shift-click";
private static final String CONFIG_GROUP = "shiftclick";
private static final String ITEM_KEY_PREFIX = "item_";
private static final int PURO_PURO_REGION_ID = 10307;
private static final WidgetMenuOption FIXED_INVENTORY_TAB_CONFIGURE = new WidgetMenuOption(CONFIGURE,
MENU_TARGET, WidgetInfo.FIXED_VIEWPORT_INVENTORY_TAB);
private static final WidgetMenuOption FIXED_INVENTORY_TAB_SAVE = new WidgetMenuOption(SAVE,
MENU_TARGET, WidgetInfo.FIXED_VIEWPORT_INVENTORY_TAB);
private static final WidgetMenuOption RESIZABLE_INVENTORY_TAB_CONFIGURE = new WidgetMenuOption(CONFIGURE,
MENU_TARGET, WidgetInfo.RESIZABLE_VIEWPORT_INVENTORY_TAB);
private static final WidgetMenuOption RESIZABLE_INVENTORY_TAB_SAVE = new WidgetMenuOption(SAVE,
MENU_TARGET, WidgetInfo.RESIZABLE_VIEWPORT_INVENTORY_TAB);
private static final WidgetMenuOption RESIZABLE_BOTTOM_LINE_INVENTORY_TAB_CONFIGURE = new WidgetMenuOption(CONFIGURE,
MENU_TARGET, WidgetInfo.RESIZABLE_VIEWPORT_BOTTOM_LINE_INVENTORY_TAB);
private static final WidgetMenuOption RESIZABLE_BOTTOM_LINE_INVENTORY_TAB_SAVE = new WidgetMenuOption(SAVE,
MENU_TARGET, WidgetInfo.RESIZABLE_VIEWPORT_BOTTOM_LINE_INVENTORY_TAB);
private static final Set<MenuAction> NPC_MENU_TYPES = ImmutableSet.of(
MenuAction.NPC_FIRST_OPTION, MenuAction.NPC_SECOND_OPTION, MenuAction.NPC_THIRD_OPTION,
MenuAction.NPC_FOURTH_OPTION, MenuAction.NPC_FIFTH_OPTION, MenuAction.EXAMINE_NPC
);
private static final Splitter NEWLINE_SPLITTER = Splitter
.on("\n")
.omitEmptyStrings()
.trimResults();
@Inject
private Client client;
@Inject
private ClientThread clientThread;
@Inject
private MenuEntrySwapperConfig config;
@Inject
private ShiftClickInputListener inputListener;
@Inject
private ConfigManager configManager;
@Inject
private PluginManager pluginManager;
@Inject
private KeyManager keyManager;
@Inject
private MenuManager menuManager;
@Inject
private ItemManager itemManager;
@Inject
private EventBus eventBus;
@Inject
private PvpToolsPlugin pvpTools;
@Inject
private PvpToolsConfig pvpToolsConfig;
private MenuEntry[] entries;
private final Set<String> leftClickConstructionItems = new HashSet<>();
private boolean buildingMode;
private boolean inTobRaid = false;
private boolean inCoxRaid = false;
private final Map<AbstractComparableEntry, AbstractComparableEntry> customSwaps = new HashMap<>();
private List<String> bankItemNames = new ArrayList<>();
@Getter(AccessLevel.PACKAGE)
private boolean configuringShiftClick = false;
@Setter(AccessLevel.PACKAGE)
private boolean shiftModifier = false;
private boolean getWithdrawOne;
private String getWithdrawOneItems;
private boolean getWithdrawFive;
private String getWithdrawFiveItems;
private boolean getWithdrawTen;
private String getWithdrawTenItems;
private boolean getWithdrawX;
private String getWithdrawXAmount;
private String getWithdrawXItems;
private boolean getWithdrawAll;
private String getWithdrawAllItems;
private boolean swapMax;
private MaxCapeMode maxMode;
private boolean getSwapArdougneCape;
private boolean getSwapConstructionCape;
private ConstructionCapeMode constructionCapeMode;
private boolean getSwapCraftingCape;
private boolean getSwapMagicCape;
private boolean getSwapExplorersRing;
private boolean swapAdmire;
private boolean swapQuestCape;
private QuestCapeMode questCapeMode;
private String configCustomSwaps;
private boolean shiftClickCustomization;
private boolean swapCoalBag;
private boolean swapBirdhouseEmpty;
private boolean swapBones;
private boolean swapChase;
private boolean swapHarpoon;
private OccultAltarMode swapOccultMode;
private HouseMode swapHomePortalMode;
private boolean swapPrivate;
private boolean swapPick;
private boolean swapQuick;
private boolean swapBoxTrap;
private boolean rockCake;
private boolean swapRogueschests;
private boolean swapClimbUpDown;
private boolean swapStun;
private boolean swapSearch;
private boolean swapHardWoodGrove;
private boolean getRemoveObjects;
private String getRemovedObjects;
private boolean swapImps;
private CharterOption charterOption;
private boolean getSwapBuyOne;
private String getBuyOneItems;
private boolean getSwapBuyFive;
private String getBuyFiveItems;
private boolean getSwapBuyTen;
private String getBuyTenItems;
private boolean getSwapBuyFifty;
private String getBuyFiftyItems;
private boolean getSwapSellOne;
private String getSellOneItems;
private boolean getSwapSellFive;
private String getSellFiveItems;
private boolean getSwapSellTen;
private String getSellTenItems;
private boolean getSwapSellFifty;
private String getSellFiftyItems;
private boolean getEasyConstruction;
private String getEasyConstructionItems;
private boolean getSwapTanning;
private boolean getSwapSawmill;
private boolean getSwapSawmillPlanks;
private boolean getSwapPuro;
private boolean swapAssignment;
private boolean swapBankExchange;
private boolean swapContract;
private boolean swapInteract;
private boolean swapPickpocket;
private boolean swapPay;
private boolean swapAbyssTeleport;
private boolean swapTrade;
private boolean swapTravel;
private boolean swapMinigame;
private boolean swapPlank;
private boolean swapMetamorphosis;
private boolean swapEnchant;
private FairyRingMode swapFairyRingMode;
private ObeliskMode swapObeliskMode;
private boolean swapTeleportItem;
private boolean swapWildernessLever;
private boolean swapNexus;
private boolean getGamesNecklace;
private GamesNecklaceMode getGamesNecklaceMode;
private boolean getDuelingRing;
private DuelingRingMode getDuelingRingMode;
private boolean getGlory;
private GloryMode getGloryMode;
private boolean getSkillsNecklace;
private SkillsNecklaceMode getSkillsNecklaceMode;
private boolean getNecklaceofPassage;
private NecklaceOfPassageMode getNecklaceofPassageMode;
private boolean getDigsitePendant;
private DigsitePendantMode getDigsitePendantMode;
private boolean getCombatBracelet;
private CombatBraceletMode getCombatBraceletMode;
private boolean getBurningAmulet;
private BurningAmuletMode getBurningAmuletMode;
private boolean getXericsTalisman;
private XericsTalismanMode getXericsTalismanMode;
private boolean getRingofWealth;
private RingOfWealthMode getRingofWealthMode;
private boolean getSlayerRing;
private SlayerRingMode getSlayerRingMode;
private boolean hideExamine;
private boolean hideTradeWith;
private boolean hideReport;
private boolean hideLookup;
private boolean hideNet;
private boolean hideBait;
private boolean hideDestroyRunepouch;
private boolean hideDestroyCoalbag;
private boolean hideDestroyHerbsack;
private boolean hideDestroyBoltpouch;
private boolean hideDestroyGembag;
private boolean hideDropRunecraftingPouch;
private boolean hideCastToB;
private Set<String> hideCastIgnoredToB;
private boolean hideCastCoX;
private Set<String> hideCastIgnoredCoX;
@Provides
MenuEntrySwapperConfig provideConfig(ConfigManager configManager)
{
return configManager.getConfig(MenuEntrySwapperConfig.class);
}
@Override
public void startUp()
{
updateConfig();
addSubscriptions();
addSwaps();
loadConstructionItems(config.getEasyConstructionItems());
if (config.shiftClickCustomization())
{
enableCustomization();
}
loadCustomSwaps(config.customSwaps());
if (client.getGameState() == GameState.LOGGED_IN)
{
setCastOptions(true);
}
}
@Override
public void shutDown()
{
eventBus.unregister(this);
disableCustomization();
loadConstructionItems("");
loadCustomSwaps(""); // Removes all custom swaps
removeSwaps();
if (client.getGameState() == GameState.LOGGED_IN)
{
resetCastOptions();
}
}
private void addSubscriptions()
{
eventBus.subscribe(ConfigChanged.class, this, this::onConfigChanged);
eventBus.subscribe(WidgetMenuOptionClicked.class, this, this::onWidgetMenuOptionClicked);
eventBus.subscribe(GameStateChanged.class, this, this::onGameStateChanged);
eventBus.subscribe(VarbitChanged.class, this, this::onVarbitChanged);
eventBus.subscribe(MenuOpened.class, this, this::onMenuOpened);
eventBus.subscribe(MenuOptionClicked.class, this, this::onMenuOptionClicked);
eventBus.subscribe(MenuEntryAdded.class, this, this::onMenuEntryAdded);
eventBus.subscribe(PostItemDefinition.class, this, this::onPostItemDefinition);
eventBus.subscribe(FocusChanged.class, this, this::onFocusChanged);
}
private void onConfigChanged(ConfigChanged event)
{
if (!"menuentryswapper".equals(event.getGroup()))
{
return;
}
removeSwaps();
updateConfig();
addSwaps();
loadConstructionItems(this.getEasyConstructionItems);
if (!CONFIG_GROUP.equals(event.getGroup()))
{
if (event.getKey().equals("customSwaps"))
{
loadCustomSwaps(this.configCustomSwaps);
}
}
if (event.getKey().equals("shiftClickCustomization"))
{
if (this.shiftClickCustomization)
{
enableCustomization();
}
else
{
disableCustomization();
}
}
else if (event.getKey().startsWith(ITEM_KEY_PREFIX))
{
clientThread.invoke(this::resetItemDefinitionCache);
}
else if ((event.getKey().equals("hideCastToB") || event.getKey().equals("hideCastIgnoredToB")))
{
if (this.hideCastToB)
{
setCastOptions(true);
}
else
{
resetCastOptions();
}
}
else if ((event.getKey().equals("hideCastCoX") || event.getKey().equals("hideCastIgnoredCoX")))
{
if (this.hideCastCoX)
{
setCastOptions(true);
}
else
{
resetCastOptions();
}
}
}
private void resetItemDefinitionCache()
{
itemManager.invalidateItemDefinitionCache();
client.getItemDefinitionCache().reset();
}
private Integer getSwapConfig(int itemId)
{
itemId = ItemVariationMapping.map(itemId);
String config = configManager.getConfiguration(CONFIG_GROUP, ITEM_KEY_PREFIX + itemId);
if (config == null || config.isEmpty())
{
return null;
}
return Integer.parseInt(config);
}
private void setSwapConfig(int itemId, int index)
{
itemId = ItemVariationMapping.map(itemId);
configManager.setConfiguration(CONFIG_GROUP, ITEM_KEY_PREFIX + itemId, index);
}
private void unsetSwapConfig(int itemId)
{
itemId = ItemVariationMapping.map(itemId);
configManager.unsetConfiguration(CONFIG_GROUP, ITEM_KEY_PREFIX + itemId);
}
private void enableCustomization()
{
keyManager.registerKeyListener(inputListener);
refreshShiftClickCustomizationMenus();
clientThread.invoke(this::resetItemDefinitionCache);
}
private void disableCustomization()
{
keyManager.unregisterKeyListener(inputListener);
removeShiftClickCustomizationMenus();
configuringShiftClick = false;
clientThread.invoke(this::resetItemDefinitionCache);
}
private void onWidgetMenuOptionClicked(WidgetMenuOptionClicked event)
{
if (event.getWidget() == WidgetInfo.FIXED_VIEWPORT_INVENTORY_TAB
|| event.getWidget() == WidgetInfo.RESIZABLE_VIEWPORT_INVENTORY_TAB
|| event.getWidget() == WidgetInfo.RESIZABLE_VIEWPORT_BOTTOM_LINE_INVENTORY_TAB)
{
configuringShiftClick = event.getMenuOption().equals(CONFIGURE) && Text.removeTags(event.getMenuTarget()).equals(MENU_TARGET);
refreshShiftClickCustomizationMenus();
}
}
private void onGameStateChanged(GameStateChanged event)
{
if (client.getGameState() != GameState.LOGGED_IN)
{
return;
}
loadConstructionItems(this.getEasyConstructionItems);
}
private void onVarbitChanged(VarbitChanged event)
{
buildingMode = client.getVar(BUILDING_MODE) == 1;
setCastOptions(false);
}
private void onMenuOpened(MenuOpened event)
{
Player localPlayer = client.getLocalPlayer();
if (localPlayer == null)
{
return;
}
if (!(MiscUtils.getWildernessLevelFrom(client, localPlayer.getWorldLocation()) >= 0))
{
return;
}
List<MenuEntry> menu_entries = new ArrayList<>();
for (MenuEntry entry : event.getMenuEntries())
{
String option = Text.removeTags(entry.getOption()).toLowerCase();
if (option.contains("trade with") && this.hideTradeWith)
{
continue;
}
if (option.contains("lookup") && this.hideLookup)
{
continue;
}
if (option.contains("report") && this.hideReport)
{
continue;
}
if (option.contains("examine") && this.hideExamine)
{
continue;
}
if (option.contains("net") && this.hideNet)
{
continue;
}
if (option.contains("bait") && this.hideBait)
{
continue;
}
if (option.contains("destroy"))
{
if (this.hideDestroyRunepouch && entry.getTarget().contains("Rune pouch"))
{
continue;
}
if (this.hideDestroyCoalbag && entry.getTarget().contains("Coal bag"))
{
continue;
}
if (this.hideDestroyHerbsack && entry.getTarget().contains("Herb sack"))
{
continue;
}
if (this.hideDestroyBoltpouch && entry.getTarget().contains("Bolt pouch"))
{
continue;
}
if (this.hideDestroyGembag && entry.getTarget().contains("Gem bag"))
{
continue;
}
}
if (option.contains("drop"))
{
if (this.hideDropRunecraftingPouch && (
entry.getTarget().contains("Small pouch")
|| entry.getTarget().contains("Medium pouch")
|| entry.getTarget().contains("Large pouch")
|| entry.getTarget().contains("Giant pouch")))
{
continue;
}
}
menu_entries.add(entry);
}
MenuEntry[] updated_menu_entries = new MenuEntry[menu_entries.size()];
updated_menu_entries = menu_entries.toArray(updated_menu_entries);
client.setMenuEntries(updated_menu_entries);
if (!configuringShiftClick)
{
return;
}
MenuEntry firstEntry = event.getFirstEntry();
if (firstEntry == null)
{
return;
}
int widgetId = firstEntry.getParam1();
if (widgetId != WidgetInfo.INVENTORY.getId())
{
return;
}
int itemId = firstEntry.getIdentifier();
if (itemId == -1)
{
return;
}
ItemDefinition itemComposition = client.getItemDefinition(itemId);
String itemName = itemComposition.getName();
String option = "Use";
int shiftClickActionIndex = itemComposition.getShiftClickActionIndex();
String[] inventoryActions = itemComposition.getInventoryActions();
if (shiftClickActionIndex >= 0 && shiftClickActionIndex < inventoryActions.length)
{
option = inventoryActions[shiftClickActionIndex];
}
MenuEntry[] entries = event.getMenuEntries();
for (MenuEntry entry : entries)
{
if (itemName.equals(Text.removeTags(entry.getTarget())))
{
entry.setType(MenuAction.RUNELITE.getId());
if (option.equals(entry.getOption()))
{
entry.setOption("* " + option);
}
}
}
final MenuEntry resetShiftClickEntry = new MenuEntry();
resetShiftClickEntry.setOption(RESET);
resetShiftClickEntry.setTarget(MENU_TARGET);
resetShiftClickEntry.setIdentifier(itemId);
resetShiftClickEntry.setParam1(widgetId);
resetShiftClickEntry.setType(MenuAction.RUNELITE.getId());
client.setMenuEntries(ArrayUtils.addAll(entries, resetShiftClickEntry));
}
private void onMenuOptionClicked(MenuOptionClicked event)
{
if (event.getMenuAction() != MenuAction.RUNELITE || event.getActionParam1() != WidgetInfo.INVENTORY.getId())
{
return;
}
int itemId = event.getIdentifier();
if (itemId == -1)
{
return;
}
String option = event.getOption();
String target = event.getTarget();
ItemDefinition itemComposition = client.getItemDefinition(itemId);
if (option.equals(RESET) && target.equals(MENU_TARGET))
{
unsetSwapConfig(itemId);
return;
}
if (!itemComposition.getName().equals(Text.removeTags(target)))
{
return;
}
int index = -1;
boolean valid = false;
if (option.equals("Use")) //because "Use" is not in inventoryActions
{
valid = true;
}
else
{
String[] inventoryActions = itemComposition.getInventoryActions();
for (index = 0; index < inventoryActions.length; index++)
{
if (option.equals(inventoryActions[index]))
{
valid = true;
break;
}
}
}
if (valid)
{
setSwapConfig(itemId, index);
}
}
public void onMenuEntryAdded(MenuEntryAdded event)
{
if (client.getGameState() != GameState.LOGGED_IN)
{
return;
}
final String pOptionToReplace = Text.removeTags(event.getOption()).toUpperCase();
final int eventId = event.getIdentifier();
final String option = Text.standardize(event.getOption());
final String target = Text.standardize(event.getTarget());
final NPC hintArrowNpc = client.getHintArrowNpc();
entries = client.getMenuEntries();
if (this.getRemoveObjects && !this.getRemovedObjects.equals(""))
{
for (String removed : Text.fromCSV(this.getRemovedObjects))
{
removed = Text.standardize(removed);
if (target.contains("(") && target.split(" \\(")[0].equals(removed))
{
delete(event.getIdentifier());
}
else if (target.contains("->"))
{
String trimmed = target.split("->")[1].trim();
if (trimmed.length() >= removed.length() && trimmed.substring(0, removed.length()).equalsIgnoreCase(removed))
{
delete(event.getIdentifier());
break;
}
}
else if (target.length() >= removed.length() && target.substring(0, removed.length()).equalsIgnoreCase(removed))
{
delete(event.getIdentifier());
break;
}
}
}
if (this.getSwapPuro && isPuroPuro())
{
if (event.getType() == WALK.getId())
{
MenuEntry[] menuEntries = client.getMenuEntries();
MenuEntry menuEntry = menuEntries[menuEntries.length - 1];
menuEntry.setType(MenuAction.WALK.getId() + MENU_ACTION_DEPRIORITIZE_OFFSET);
client.setMenuEntries(menuEntries);
}
else if (option.equalsIgnoreCase("examine"))
{
swap(client, "push-through", option, target);
}
else if (option.equalsIgnoreCase("use"))
{
swap(client, "escape", option, target);
}
}
if (hintArrowNpc != null
&& hintArrowNpc.getIndex() == eventId
&& NPC_MENU_TYPES.contains(MenuAction.of(event.getType())))
{
return;
}
if (this.swapImps && target.contains("impling"))
{
if (client.getItemContainer(InventoryID.BANK) != null)
{
bankItemNames = new ArrayList<>();
for (Item i : Objects.requireNonNull(client.getItemContainer(InventoryID.BANK)).getItems())
{
bankItemNames.add(client.getItemDefinition((i.getId())).getName());
}
}
List<String> invItemNames = new ArrayList<>();
if (target.equals("gourmet impling jar"))
{
if (client.getItemContainer(InventoryID.INVENTORY) != null)
{
for (Item i : Objects.requireNonNull(client.getItemContainer(InventoryID.INVENTORY)).getItems())
{
invItemNames.add(client.getItemDefinition((i.getId())).getName());
}
if ((invItemNames.contains("Clue scroll (easy)") || bankItemNames.contains("Clue scroll (easy)")))
{
menuManager.addSwap("loot", target, "use");
}
else
{
menuManager.removeSwaps(target);
}
}
}
switch (target)
{
case "eclectic impling jar":
if (client.getItemContainer(InventoryID.INVENTORY) != null)
{
for (Item i : Objects.requireNonNull(client.getItemContainer(InventoryID.INVENTORY)).getItems())
{
invItemNames.add(client.getItemDefinition((i.getId())).getName());
}
if ((invItemNames.contains("Clue scroll (medium)") || bankItemNames.contains("Clue scroll (medium)")))
{
menuManager.addSwap("loot", target, "use");
}
else
{
menuManager.removeSwaps(target);
}
}
break;
case "magpie impling jar":
case "nature impling jar":
case "ninja impling jar":
if (client.getItemContainer(InventoryID.INVENTORY) != null)
{
for (Item i : Objects.requireNonNull(client.getItemContainer(InventoryID.INVENTORY)).getItems())
{
invItemNames.add(client.getItemDefinition((i.getId())).getName());
}
if ((invItemNames.contains("Clue scroll (hard)") || bankItemNames.contains("Clue scroll (hard)")))
{
menuManager.addSwap("loot", target, "use");
}
else
{
menuManager.removeSwaps(target);
}
}
break;
case "crystal impling jar":
case "dragon impling jar":
if (client.getItemContainer(InventoryID.INVENTORY) != null)
{
for (Item i : Objects.requireNonNull(client.getItemContainer(InventoryID.INVENTORY)).getItems())
{
invItemNames.add(client.getItemDefinition((i.getId())).getName());
}
if ((invItemNames.contains("Clue scroll (elite)") || bankItemNames.contains("Clue scroll (elite)")))
{
menuManager.addSwap("loot", target, "use");
}
else
{
menuManager.removeSwaps(target);
}
}
break;
}
}
if (this.shiftClickCustomization && shiftModifier && !option.equals("use"))
{
Integer customOption = getSwapConfig(eventId);
if (customOption != null && customOption == -1)
{
menuManager.addPriorityEntry("Use");
}
else
{
menuManager.removePriorityEntry("Use");
}
}
}
private void onPostItemDefinition(PostItemDefinition event)
{
ItemDefinition itemComposition = event.getItemDefinition();
Integer option = getSwapConfig(itemComposition.getId());
if (option != null)
{
itemComposition.setShiftClickActionIndex(option);
}
}
private void onFocusChanged(FocusChanged event)
{
if (!event.isFocused())
{
shiftModifier = false;
}
}
private void removeShiftClickCustomizationMenus()
{
menuManager.removeManagedCustomMenu(FIXED_INVENTORY_TAB_CONFIGURE);
menuManager.removeManagedCustomMenu(FIXED_INVENTORY_TAB_SAVE);
menuManager.removeManagedCustomMenu(RESIZABLE_BOTTOM_LINE_INVENTORY_TAB_CONFIGURE);
menuManager.removeManagedCustomMenu(RESIZABLE_BOTTOM_LINE_INVENTORY_TAB_SAVE);
menuManager.removeManagedCustomMenu(RESIZABLE_INVENTORY_TAB_CONFIGURE);
menuManager.removeManagedCustomMenu(RESIZABLE_INVENTORY_TAB_SAVE);
}
private void refreshShiftClickCustomizationMenus()
{
removeShiftClickCustomizationMenus();
if (configuringShiftClick)
{
menuManager.addManagedCustomMenu(FIXED_INVENTORY_TAB_SAVE);
menuManager.addManagedCustomMenu(RESIZABLE_BOTTOM_LINE_INVENTORY_TAB_SAVE);
menuManager.addManagedCustomMenu(RESIZABLE_INVENTORY_TAB_SAVE);
}
else
{
menuManager.addManagedCustomMenu(FIXED_INVENTORY_TAB_CONFIGURE);
menuManager.addManagedCustomMenu(RESIZABLE_BOTTOM_LINE_INVENTORY_TAB_CONFIGURE);
menuManager.addManagedCustomMenu(RESIZABLE_INVENTORY_TAB_CONFIGURE);
}
}
private void loadCustomSwaps(String config)
{
Map<AbstractComparableEntry, AbstractComparableEntry> tmp = new HashMap<>();
if (!Strings.isNullOrEmpty(config))
{
StringBuilder sb = new StringBuilder();
for (String str : config.split("\n"))
{
if (!str.startsWith("//"))
{
sb.append(str).append("\n");
}
}
Map<String, String> split = NEWLINE_SPLITTER.withKeyValueSeparator(':').split(sb);
for (Map.Entry<String, String> entry : split.entrySet())
{
String from = entry.getKey();
String to = entry.getValue();
String[] splitFrom = Text.standardize(from).split(",");
String optionFrom = splitFrom[0].trim();
String targetFrom;
if (splitFrom.length == 1)
{
targetFrom = "";
}
else
{
targetFrom = splitFrom[1].trim();
}
AbstractComparableEntry fromEntry = newBaseComparableEntry(optionFrom, targetFrom);
String[] splitTo = Text.standardize(to).split(",");
String optionTo = splitTo[0].trim();
String targetTo;
if (splitTo.length == 1)
{
targetTo = "";
}
else
{
targetTo = splitTo[1].trim();
}
AbstractComparableEntry toEntry = newBaseComparableEntry(optionTo, targetTo);
tmp.put(fromEntry, toEntry);
}
}
for (Map.Entry<AbstractComparableEntry, AbstractComparableEntry> e : customSwaps.entrySet())
{
AbstractComparableEntry key = e.getKey();
AbstractComparableEntry value = e.getValue();
menuManager.removeSwap(key, value);
}
customSwaps.clear();
customSwaps.putAll(tmp);
for (Map.Entry<AbstractComparableEntry, AbstractComparableEntry> entry : customSwaps.entrySet())
{
AbstractComparableEntry a1 = entry.getKey();
AbstractComparableEntry a2 = entry.getValue();
menuManager.addSwap(a1, a2);
}
}
private void addSwaps()
{
if (this.getBurningAmulet)
{
menuManager.addPriorityEntry(this.getBurningAmuletMode.toString(), "burning amulet");
}
if (this.getWithdrawOne)
{
Text.fromCSV(this.getWithdrawOneItems).forEach(item ->
{
menuManager.addPriorityEntry(newBankComparableEntry("Withdraw-1", item)).setPriority(10);
menuManager.addPriorityEntry(newBankComparableEntry("Deposit-1", item)).setPriority(10);
});
}
if (this.getWithdrawFive)
{
Text.fromCSV(this.getWithdrawFiveItems).forEach(item ->
{
menuManager.addPriorityEntry(newBankComparableEntry("Withdraw-5", item)).setPriority(10);
menuManager.addPriorityEntry(newBankComparableEntry("Deposit-5", item)).setPriority(10);
});
}
if (this.getWithdrawTen)
{
Text.fromCSV(this.getWithdrawTenItems).forEach(item ->
{
menuManager.addPriorityEntry(newBankComparableEntry("Withdraw-10", item)).setPriority(10);
menuManager.addPriorityEntry(newBankComparableEntry("Deposit-10", item)).setPriority(10);
});
}
if (this.getWithdrawX)
{
Text.fromCSV(this.getWithdrawXItems).forEach(item ->
{
menuManager.addPriorityEntry(newBankComparableEntry("Withdraw-" + this.getWithdrawXAmount, item)).setPriority(10);
menuManager.addPriorityEntry(newBankComparableEntry("Deposit-" + this.getWithdrawXAmount, item)).setPriority(10);
});
}
if (this.getWithdrawAll)
{
Text.fromCSV(this.getWithdrawAllItems).forEach(item ->
{
menuManager.addPriorityEntry(newBankComparableEntry("Withdraw-All", item)).setPriority(10);
menuManager.addPriorityEntry(newBankComparableEntry("Deposit-All", item)).setPriority(10);
});
}
if (this.getSwapBuyOne)
{
Text.fromCSV(this.getBuyOneItems).forEach(item -> menuManager.addPriorityEntry("Buy 1", item).setPriority(100));
}
if (this.getSwapBuyFive)
{
Text.fromCSV(this.getBuyFiveItems).forEach(item -> menuManager.addPriorityEntry("Buy 5", item).setPriority(100));
}
if (this.getSwapBuyTen)
{
Text.fromCSV(this.getBuyTenItems).forEach(item -> menuManager.addPriorityEntry("Buy 10", item).setPriority(100));
}
if (this.getSwapBuyFifty)
{
Text.fromCSV(this.getBuyFiftyItems).forEach(item -> menuManager.addPriorityEntry("Buy 50", item).setPriority(100));
}
if (this.getSwapSellOne)
{
Text.fromCSV(this.getSellOneItems).forEach(item -> menuManager.addPriorityEntry("Sell 1", item).setPriority(100));
}
if (this.getSwapSellFive)
{
Text.fromCSV(this.getSellFiveItems).forEach(item -> menuManager.addPriorityEntry("Sell 5", item).setPriority(100));
}
if (this.getSwapSellTen)
{
Text.fromCSV(this.getSellTenItems).forEach(item -> menuManager.addPriorityEntry("Sell 10", item).setPriority(100));
}
if (this.getSwapSellFifty)
{
Text.fromCSV(this.getSellFiftyItems).forEach(item -> menuManager.addPriorityEntry("Sell 50", item).setPriority(100));
}
if (this.getSwapTanning)
{
menuManager.addPriorityEntry("Tan All");
}
if (this.getSwapSawmill)
{
menuManager.addPriorityEntry("Buy-plank", "Sawmill operator");
}
if (this.getSwapSawmillPlanks)
{
//Not much we can do for this one, Buy all is the only thing, there is no target.
menuManager.addPriorityEntry("Buy All").setPriority(10);
}
if (this.getSwapArdougneCape)
{
menuManager.addPriorityEntry("Kandarin Monastery");
menuManager.addPriorityEntry("Monastery Teleport");
}
if (this.getSwapCraftingCape)
{
menuManager.addPriorityEntry("Teleport", "Crafting cape");
menuManager.addPriorityEntry("Teleport", "Crafting cape(t)");
}
if (this.getSwapConstructionCape)
{
menuManager.addPriorityEntry(this.constructionCapeMode.toString(), "Construct. cape");
menuManager.addPriorityEntry(this.constructionCapeMode.toString(), "Construct. cape(t)");
}
if (this.getSwapMagicCape)
{
menuManager.addPriorityEntry("Spellbook", "Magic cape");
menuManager.addPriorityEntry("Spellbook", "Magic cape(t)");
}
if (this.getSwapExplorersRing)
{
menuManager.addPriorityEntry("Teleport", "Explorer's ring 2");
menuManager.addPriorityEntry("Teleport", "Explorer's ring 3");
menuManager.addPriorityEntry("Teleport", "Explorer's ring 4");
}
if (this.swapPickpocket)
{
menuManager.addPriorityEntry("Pickpocket").setPriority(1);
}
if (this.swapHardWoodGrove)
{
menuManager.addPriorityEntry("Send-parcel", "Rionasta");
}
if (this.swapBankExchange)
{
menuManager.addPriorityEntry("Bank").setPriority(1);
menuManager.addPriorityEntry("Exchange").setPriority(10);
}
if (this.swapContract)
{
menuManager.addPriorityEntry("Contract").setPriority(10);
}
if (this.swapInteract)
{
menuManager.addPriorityEntry("Repairs").setPriority(10);
menuManager.addPriorityEntry("Claim-slime").setPriority(10);
menuManager.addPriorityEntry("Decant").setPriority(10);
menuManager.addPriorityEntry("Claim").setPriority(10);
menuManager.addPriorityEntry("Heal").setPriority(10);
menuManager.addPriorityEntry("Help").setPriority(10);
}
if (this.swapAssignment)
{
menuManager.addPriorityEntry("Assignment");
}
if (this.swapPlank)
{
menuManager.addPriorityEntry("Buy-plank").setPriority(10);
}
if (this.swapTrade)
{
menuManager.addPriorityEntry("Trade").setPriority(1);
menuManager.addPriorityEntry("Trade-with").setPriority(1);
menuManager.addPriorityEntry("Shop").setPriority(1);
}
if (this.swapMinigame)
{
menuManager.addPriorityEntry("Story");
menuManager.addPriorityEntry("Escort");
menuManager.addPriorityEntry("Dream");
menuManager.addPriorityEntry("Start-minigame");
}
if (this.swapTravel)
{
menuManager.addPriorityEntry("Travel");
menuManager.addPriorityEntry("Pay-fare");
menuManager.addPriorityEntry("Charter");
menuManager.addPriorityEntry("Take-boat");
menuManager.addPriorityEntry("Fly");
menuManager.addPriorityEntry("Jatizso");
menuManager.addPriorityEntry("Neitiznot");
menuManager.addPriorityEntry("Rellekka");
menuManager.addPriorityEntry("Follow", "Elkoy").setPriority(10);
menuManager.addPriorityEntry("Transport");
menuManager.addPriorityEntry("Teleport", "Mage of zamorak").setPriority(10);
}
if (this.swapPay)
{
menuManager.addPriorityEntry("Pay");
menuManager.addPriorityEntry("Pay (");
}
if (this.swapQuick)
{
menuManager.addPriorityEntry("Quick-travel");
}
if (this.swapEnchant)
{
menuManager.addPriorityEntry("Enchant");
}
if (this.swapWildernessLever)
{
menuManager.addPriorityEntry("Edgeville", "Lever");
}
if (this.swapMetamorphosis)
{
menuManager.addPriorityEntry("Metamorphosis", "Baby chinchompa");
}
if (this.swapStun)
{
menuManager.addPriorityEntry("Stun", "Hoop snake");
}
if (this.swapTravel)
{
menuManager.addPriorityEntry("Pay-toll(2-ecto)", "Energy barrier");
menuManager.addPriorityEntry("Pay-toll(10gp)", "Gate");
menuManager.addPriorityEntry("Travel", "Trapdoor");
}
if (this.swapHarpoon)
{
menuManager.addPriorityEntry("Harpoon");
}
if (this.swapBoxTrap)
{
menuManager.addPriorityEntry("Reset", "Box trap");
menuManager.addPriorityEntry("Lay", "Box trap");
menuManager.addPriorityEntry("Activate", "Box trap");
}
if (this.swapChase)
{
menuManager.addPriorityEntry("Chase");
}
if (this.swapBirdhouseEmpty)
{
menuManager.addPriorityEntry("Empty", "Birdhouse");
}
if (this.swapQuick)
{
menuManager.addPriorityEntry("Quick-enter");
menuManager.addPriorityEntry("Quick-start");
menuManager.addPriorityEntry("Quick-pass");
menuManager.addPriorityEntry("Quick-open");
menuManager.addPriorityEntry("Quick-leave");
}
if (this.swapAdmire)
{
menuManager.addPriorityEntry("Teleport", "Mounted Strength Cape").setPriority(10);
menuManager.addPriorityEntry("Teleport", "Mounted Construction Cape").setPriority(10);
menuManager.addPriorityEntry("Teleport", "Mounted Crafting Cape").setPriority(10);
menuManager.addPriorityEntry("Teleport", "Mounted Hunter Cape").setPriority(10);
menuManager.addPriorityEntry("Teleport", "Mounted Fishing Cape").setPriority(10);
menuManager.addPriorityEntry("Spellbook", "Mounted Magic Cape");
menuManager.addPriorityEntry("Perks", "Mounted Max Cape");
}
if (this.swapPrivate)
{
menuManager.addPriorityEntry("Private");
}
if (this.swapPick)
{
menuManager.addPriorityEntry("Pick-lots");
}
if (this.swapSearch)
{
menuManager.addPriorityEntry("Search");
}
if (this.swapRogueschests)
{
menuManager.addPriorityEntry("Search for traps");
}
if (this.rockCake)
{
menuManager.addPriorityEntry("Guzzle", "Dwarven rock cake");
}
if (this.swapTeleportItem)
{
menuManager.addSwap("Wear", "", "Rub");
menuManager.addSwap("Wield", "", "Rub");
menuManager.addSwap("Wear", "", "Teleport");
menuManager.addSwap("Wield", "", "Teleport");
}
if (this.swapCoalBag)
{
menuManager.addPriorityEntry("Empty", "Coal bag");
}
if (this.swapBones)
{
menuManager.addSwap("Bury", "", "Use");
}
if (this.swapNexus)
{
menuManager.addPriorityEntry("Teleport menu", "Portal nexus");
}
switch (this.swapFairyRingMode)
{
case OFF:
case ZANARIS:
menuManager.removeSwaps("Fairy ring");
menuManager.removeSwaps("Tree");
break;
case CONFIGURE:
menuManager.addPriorityEntry("Configure", "Fairy ring");
break;
case LAST_DESTINATION:
menuManager.addPriorityEntry("Last-destination");
break;
}
switch (this.swapOccultMode)
{
case LUNAR:
menuManager.addPriorityEntry("Lunar", "Altar of the Occult");
break;
case ANCIENT:
menuManager.addPriorityEntry("Ancient", "Altar of the Occult");
break;
case ARCEUUS:
menuManager.addPriorityEntry("Arceuus", "Altar of the Occult");
break;
}
switch (this.swapObeliskMode)
{
case SET_DESTINATION:
menuManager.addPriorityEntry("Set destination", "Obelisk");
break;
case TELEPORT_TO_DESTINATION:
menuManager.addPriorityEntry("Teleport to destination", "Obelisk");
break;
}
switch (this.swapHomePortalMode)
{
case HOME:
menuManager.addPriorityEntry("Home");
break;
case BUILD_MODE:
menuManager.addPriorityEntry("Build mode");
break;
case FRIENDS_HOUSE:
menuManager.addPriorityEntry("Friend's house");
break;
}
if (this.swapHardWoodGrove)
{
menuManager.addPriorityEntry("Quick-pay(100)", "Hardwood grove doors");
}
if (this.getCombatBracelet)
{
menuManager.addPriorityEntry(this.getCombatBraceletMode.toString());
}
if (this.getGamesNecklace)
{
menuManager.addPriorityEntry(this.getGamesNecklaceMode.toString());
}
if (this.getDuelingRing)
{
menuManager.addPriorityEntry(this.getDuelingRingMode.toString());
}
if (this.getGlory)
{
menuManager.addPriorityEntry(this.getGloryMode.toString());
menuManager.addPriorityEntry(this.getGloryMode.toString());
}
if (this.getSkillsNecklace)
{
menuManager.addPriorityEntry(this.getSkillsNecklaceMode.toString());
}
if (this.getNecklaceofPassage)
{
menuManager.addPriorityEntry(this.getNecklaceofPassageMode.toString());
}
if (this.getDigsitePendant)
{
menuManager.addPriorityEntry(this.getDigsitePendantMode.toString());
}
if (this.getSlayerRing)
{
menuManager.addPriorityEntry(this.getSlayerRingMode.toString());
menuManager.addPriorityEntry(this.getSlayerRingMode.toString());
}
if (this.getXericsTalisman)
{
menuManager.addPriorityEntry(this.getXericsTalismanMode.toString());
}
if (this.getRingofWealth)
{
menuManager.addPriorityEntry(this.getRingofWealthMode.toString());
}
if (this.swapMax)
{
menuManager.addPriorityEntry(this.maxMode.toString(), "max cape");
}
if (this.swapQuestCape)
{
menuManager.addPriorityEntry(this.questCapeMode.toString(), "quest point cape");
}
}
private void removeSwaps()
{
Text.fromCSV(this.getWithdrawOneItems).forEach(item ->
{
menuManager.removePriorityEntry("Withdraw-1", item);
menuManager.removePriorityEntry("Deposit-1", item);
});
Text.fromCSV(this.getWithdrawFiveItems).forEach(item ->
{
menuManager.removePriorityEntry("Withdraw-5", item);
menuManager.removePriorityEntry("Deposit-5", item);
});
Text.fromCSV(this.getWithdrawTenItems).forEach(item ->
{
menuManager.removePriorityEntry("Withdraw-10", item);
menuManager.removePriorityEntry("Deposit-10", item);
});
Text.fromCSV(this.getWithdrawXItems).forEach(item ->
{
menuManager.removePriorityEntry("Withdraw-" + this.getWithdrawXAmount, item);
menuManager.removePriorityEntry("Deposit-" + this.getWithdrawXAmount, item);
});
Text.fromCSV(this.getWithdrawAllItems).forEach(item ->
{
menuManager.removePriorityEntry("Withdraw-All", item);
menuManager.removePriorityEntry("Deposit-All", item);
});
Text.fromCSV(this.getBuyOneItems).forEach(item -> menuManager.removePriorityEntry("Buy 1", item));
Text.fromCSV(this.getBuyFiveItems).forEach(item -> menuManager.removePriorityEntry("Buy 5", item));
Text.fromCSV(this.getBuyTenItems).forEach(item -> menuManager.removePriorityEntry("Buy 10", item));
Text.fromCSV(this.getBuyFiftyItems).forEach(item -> menuManager.removePriorityEntry("Buy 50", item));
Text.fromCSV(this.getSellOneItems).forEach(item -> menuManager.removePriorityEntry("Sell 1", item));
Text.fromCSV(this.getSellFiveItems).forEach(item -> menuManager.removePriorityEntry("Sell 5", item));
Text.fromCSV(this.getSellTenItems).forEach(item -> menuManager.removePriorityEntry("Sell 10", item));
Text.fromCSV(this.getSellFiftyItems).forEach(item -> menuManager.removePriorityEntry("Sell 50", item));
menuManager.removeSwaps("Fairy ring");
menuManager.removeSwaps("Tree");
menuManager.removePriorityEntry(this.getGloryMode.toString());
menuManager.removePriorityEntry(this.getGloryMode.toString());
menuManager.removePriorityEntry(this.getSkillsNecklaceMode.toString());
menuManager.removePriorityEntry(this.getNecklaceofPassageMode.toString());
menuManager.removePriorityEntry(this.getDigsitePendantMode.toString());
menuManager.removePriorityEntry(this.getSlayerRingMode.toString());
menuManager.removePriorityEntry(this.getSlayerRingMode.toString());
menuManager.removePriorityEntry(this.getXericsTalismanMode.toString());
menuManager.removePriorityEntry(this.getRingofWealthMode.toString());
menuManager.removePriorityEntry(this.maxMode.toString(), "max cape");
menuManager.removePriorityEntry(this.questCapeMode.toString(), "quest point cape");
menuManager.removePriorityEntry("Smith All");
menuManager.removePriorityEntry("Smith All Sets");
menuManager.removePriorityEntry("Tan All");
menuManager.removePriorityEntry("Buy-plank", "Sawmill operator");
menuManager.removePriorityEntry("Buy All");
menuManager.removePriorityEntry("Kandarin Monastery");
menuManager.removePriorityEntry("Monastery Teleport");
menuManager.removePriorityEntry("Teleport", "Crafting cape");
menuManager.removePriorityEntry("Teleport", "Crafting cape(t)");
menuManager.removePriorityEntry("Tele to poh", "Construct. cape");
menuManager.removePriorityEntry("Tele to poh", "Construct. cape(t)");
menuManager.removePriorityEntry("Spellbook", "Magic cape");
menuManager.removePriorityEntry("Spellbook", "Magic cape(t)");
menuManager.removePriorityEntry("Teleport", "Explorer's ring 2");
menuManager.removePriorityEntry("Teleport", "Explorer's ring 3");
menuManager.removePriorityEntry("Teleport", "Explorer's ring 4");
menuManager.removePriorityEntry("Pickpocket");
menuManager.removePriorityEntry("Send-parcel", "Rionasta");
menuManager.removePriorityEntry("Bank");
menuManager.removePriorityEntry("Exchange");
menuManager.removePriorityEntry("Contract");
menuManager.removePriorityEntry("Repairs");
menuManager.removePriorityEntry("Claim-slime");
menuManager.removePriorityEntry("Decant");
menuManager.removePriorityEntry("Claim");
menuManager.removePriorityEntry("Heal");
menuManager.removePriorityEntry("Help");
menuManager.removePriorityEntry("Assignment");
menuManager.removePriorityEntry("Buy-plank");
menuManager.removePriorityEntry("Trade");
menuManager.removePriorityEntry("Trade-with");
menuManager.removePriorityEntry("Shop");
menuManager.removePriorityEntry("Story");
menuManager.removePriorityEntry("Escort");
menuManager.removePriorityEntry("Dream");
menuManager.removePriorityEntry("Start-minigame");
menuManager.removePriorityEntry("Travel");
menuManager.removePriorityEntry("Pay-fare");
menuManager.removePriorityEntry("Charter");
menuManager.removePriorityEntry("Take-boat");
menuManager.removePriorityEntry("Fly");
menuManager.removePriorityEntry("Jatizso");
menuManager.removePriorityEntry("Neitiznot");
menuManager.removePriorityEntry("Rellekka");
menuManager.removePriorityEntry("Follow");
menuManager.removePriorityEntry("Transport");
menuManager.removePriorityEntry("Teleport", "Mage of zamorak");
menuManager.removePriorityEntry("Pay");
menuManager.removePriorityEntry("Pay (");
menuManager.removePriorityEntry("Quick-travel");
menuManager.removePriorityEntry("Enchant");
menuManager.removePriorityEntry("Edgeville", "Lever");
menuManager.removePriorityEntry("Metamorphosis", "Baby chinchompa");
menuManager.removePriorityEntry("Stun", "Hoop snake");
menuManager.removePriorityEntry("Pay-toll(2-ecto)", "Energy barrier");
menuManager.removePriorityEntry("Pay-toll(10gp)", "Gate");
menuManager.removePriorityEntry("Travel", "Trapdoor");
menuManager.removePriorityEntry("Harpoon");
menuManager.removePriorityEntry("Reset", "Box trap");
menuManager.removePriorityEntry("Lay", "Box trap");
menuManager.removePriorityEntry("Activate", "Box trap");
menuManager.removePriorityEntry("Chase");
menuManager.removePriorityEntry("Empty", "Birdhouse");
menuManager.removePriorityEntry("Quick-enter");
menuManager.removePriorityEntry("Quick-start");
menuManager.removePriorityEntry("Quick-pass");
menuManager.removePriorityEntry("Quick-open");
menuManager.removePriorityEntry("Quick-enter");
menuManager.removePriorityEntry("Quick-leave");
menuManager.removePriorityEntry("Teleport", "Mounted Strength Cape");
menuManager.removePriorityEntry("Teleport", "Mounted Construction Cape");
menuManager.removePriorityEntry("Teleport", "Mounted Crafting Cape");
menuManager.removePriorityEntry("Teleport", "Mounted Hunter Cape");
menuManager.removePriorityEntry("Teleport", "Mounted Fishing Cape");
menuManager.removePriorityEntry("Spellbook", "Mounted Magic Cape");
menuManager.removePriorityEntry("Perks", "Mounted Max Cape");
menuManager.removePriorityEntry("Private");
menuManager.removePriorityEntry("Pick-lots");
menuManager.removePriorityEntry("Search");
menuManager.removePriorityEntry("Search for traps");
menuManager.removePriorityEntry("Guzzle", "Dwarven rock cake");
menuManager.removeSwap("Wear", "", "Rub");
menuManager.removeSwap("Wield", "", "Rub");
menuManager.removeSwap("Wear", "", "Teleport");
menuManager.removeSwap("Wield", "", "Teleport");
switch (this.swapFairyRingMode)
{
case OFF:
case ZANARIS:
menuManager.removeSwaps("Fairy ring");
menuManager.removeSwaps("Tree");
break;
case CONFIGURE:
menuManager.removePriorityEntry("Configure", "Fairy ring");
break;
case LAST_DESTINATION:
menuManager.removePriorityEntry("Last-destination");
break;
}
switch (this.swapOccultMode)
{
case LUNAR:
menuManager.removePriorityEntry("Lunar", "Altar of the Occult");
break;
case ANCIENT:
menuManager.removePriorityEntry("Ancient", "Altar of the Occult");
break;
case ARCEUUS:
menuManager.removePriorityEntry("Arceuus", "Altar of the Occult");
break;
}
switch (this.swapObeliskMode)
{
case SET_DESTINATION:
menuManager.removePriorityEntry("Set destination", "Obelisk");
break;
case TELEPORT_TO_DESTINATION:
menuManager.removePriorityEntry("Teleport to destination", "Obelisk");
break;
}
switch (this.swapHomePortalMode)
{
case HOME:
menuManager.removePriorityEntry("Home");
break;
case BUILD_MODE:
menuManager.removePriorityEntry("Build mode");
break;
case FRIENDS_HOUSE:
menuManager.removePriorityEntry("Friend's house");
break;
}
}
private void delete(int target)
{
for (int i = entries.length - 1; i >= 0; i--)
{
if (entries[i].getIdentifier() == target)
{
entries = ArrayUtils.remove(entries, i);
i--;
}
}
client.setMenuEntries(entries);
}
private boolean isPuroPuro()
{
Player player = client.getLocalPlayer();
if (player == null)
{
return false;
}
else
{
WorldPoint location = player.getWorldLocation();
return location.getRegionID() == PURO_PURO_REGION_ID;
}
}
private void loadConstructionItems(String from)
{
if (client.getGameState() != GameState.LOGGED_IN
|| Strings.isNullOrEmpty(from) && leftClickConstructionItems.isEmpty())
{
return;
}
if (!leftClickConstructionItems.isEmpty())
{
leftClickConstructionItems.forEach(item ->
{
menuManager.removePriorityEntry("build", item);
menuManager.removePriorityEntry("remove", item);
});
leftClickConstructionItems.clear();
}
if (this.getEasyConstruction && !Strings.isNullOrEmpty(from) && buildingMode)
{
Text.fromCSV(from).forEach(item ->
{
if (leftClickConstructionItems.contains(item))
{
return;
}
menuManager.addPriorityEntry("build", item);
menuManager.addPriorityEntry("remove", item);
leftClickConstructionItems.add(item);
});
}
}
void startShift()
{
if (!this.swapClimbUpDown)
{
return;
}
menuManager.addPriorityEntry("climb-up");
}
void stopShift()
{
menuManager.removePriorityEntry("climb-up");
}
void startControl()
{
if (!this.swapClimbUpDown)
{
return;
}
menuManager.addPriorityEntry("climb-down");
}
void stopControl()
{
menuManager.removePriorityEntry("climb-down");
}
private void setCastOptions(boolean force)
{
clientThread.invoke(() ->
{
boolean tmpInCoxRaid = client.getVar(Varbits.IN_RAID) == 1;
if (tmpInCoxRaid != inCoxRaid || force)
{
if (tmpInCoxRaid && this.hideCastCoX)
{
client.setHideFriendCastOptions(true);
client.setHideClanmateCastOptions(true);
client.setUnhiddenCasts(this.hideCastIgnoredCoX);
}
inCoxRaid = tmpInCoxRaid;
}
boolean tmpInTobRaid = client.getVar(Varbits.THEATRE_OF_BLOOD) == 2;
if (tmpInTobRaid != inTobRaid || force)
{
if (tmpInTobRaid && this.hideCastToB)
{
client.setHideFriendCastOptions(true);
client.setHideClanmateCastOptions(true);
client.setUnhiddenCasts(this.hideCastIgnoredToB);
}
inTobRaid = tmpInTobRaid;
}
if (!inCoxRaid && !inTobRaid)
{
resetCastOptions();
}
});
}
private void resetCastOptions()
{
clientThread.invoke(() ->
{
if (client.getVar(Varbits.IN_WILDERNESS) == 1 || WorldType.isAllPvpWorld(client.getWorldType()) && pluginManager.isPluginEnabled(pvpTools) && pvpToolsConfig.hideCast())
{
pvpTools.setCastOptions();
}
else
{
client.setHideFriendCastOptions(false);
client.setHideClanmateCastOptions(false);
}
});
}
private void updateConfig()
{
this.getWithdrawOne = config.getWithdrawOne();
this.getWithdrawOneItems = config.getWithdrawOneItems();
this.getWithdrawFive = config.getWithdrawFive();
this.getWithdrawFiveItems = config.getWithdrawFiveItems();
this.getWithdrawTen = config.getWithdrawTen();
this.getWithdrawTenItems = config.getWithdrawTenItems();
this.getWithdrawX = config.getWithdrawX();
this.getWithdrawXAmount = config.getWithdrawXAmount();
this.getWithdrawXItems = config.getWithdrawXItems();
this.getWithdrawAll = config.getWithdrawAll();
this.getWithdrawAllItems = config.getWithdrawAllItems();
this.swapMax = config.swapMax();
this.maxMode = config.maxMode();
this.getSwapArdougneCape = config.getSwapArdougneCape();
this.getSwapConstructionCape = config.getSwapConstructionCape();
this.constructionCapeMode = config.constructionCapeMode();
this.getSwapCraftingCape = config.getSwapCraftingCape();
this.getSwapMagicCape = config.getSwapMagicCape();
this.getSwapExplorersRing = config.getSwapExplorersRing();
this.swapAdmire = config.swapAdmire();
this.swapQuestCape = config.swapQuestCape();
this.questCapeMode = config.questCapeMode();
this.configCustomSwaps = config.customSwaps();
this.shiftClickCustomization = config.shiftClickCustomization();
this.swapCoalBag = config.swapCoalBag();
this.swapBirdhouseEmpty = config.swapBirdhouseEmpty();
this.swapBones = config.swapBones();
this.swapChase = config.swapChase();
this.swapHarpoon = config.swapHarpoon();
this.swapOccultMode = config.swapOccultMode();
this.swapHomePortalMode = config.swapHomePortalMode();
this.swapPrivate = config.swapPrivate();
this.swapPick = config.swapPick();
this.swapQuick = config.swapQuick();
this.swapBoxTrap = config.swapBoxTrap();
this.rockCake = config.rockCake();
this.swapRogueschests = config.swapRogueschests();
this.swapClimbUpDown = config.swapClimbUpDown();
this.swapStun = config.swapStun();
this.swapSearch = config.swapSearch();
this.swapHardWoodGrove = config.swapHardWoodGrove();
this.getRemoveObjects = config.getRemoveObjects();
this.getRemovedObjects = config.getRemovedObjects();
this.swapImps = config.swapImps();
this.charterOption = config.charterOption();
this.getSwapBuyOne = config.getSwapBuyOne();
this.getBuyOneItems = config.getBuyOneItems();
this.getSwapBuyFive = config.getSwapBuyFive();
this.getBuyFiveItems = config.getBuyFiveItems();
this.getSwapBuyTen = config.getSwapBuyTen();
this.getBuyTenItems = config.getBuyTenItems();
this.getSwapBuyFifty = config.getSwapBuyFifty();
this.getBuyFiftyItems = config.getBuyFiftyItems();
this.getSwapSellOne = config.getSwapSellOne();
this.getSellOneItems = config.getSellOneItems();
this.getSwapSellFive = config.getSwapSellFive();
this.getSellFiveItems = config.getSellFiveItems();
this.getSwapSellTen = config.getSwapSellTen();
this.getSellTenItems = config.getSellTenItems();
this.getSwapSellFifty = config.getSwapSellFifty();
this.getSellFiftyItems = config.getSellFiftyItems();
this.getEasyConstruction = config.getEasyConstruction();
this.getEasyConstructionItems = config.getEasyConstructionItems();
this.getSwapTanning = config.getSwapTanning();
this.getSwapSawmill = config.getSwapSawmill();
this.getSwapSawmillPlanks = config.getSwapSawmillPlanks();
this.getSwapPuro = config.getSwapPuro();
this.swapAssignment = config.swapAssignment();
this.swapBankExchange = config.swapBankExchange();
this.swapContract = config.swapContract();
this.swapInteract = config.swapInteract();
this.swapPickpocket = config.swapPickpocket();
this.swapPay = config.swapPay();
this.swapAbyssTeleport = config.swapAbyssTeleport();
this.swapTrade = config.swapTrade();
this.swapTravel = config.swapTravel();
this.swapMinigame = config.swapMinigame();
this.swapPlank = config.swapPlank();
this.swapMetamorphosis = config.swapMetamorphosis();
this.swapEnchant = config.swapEnchant();
this.swapFairyRingMode = config.swapFairyRingMode();
this.swapObeliskMode = config.swapObeliskMode();
this.swapTeleportItem = config.swapTeleportItem();
this.swapWildernessLever = config.swapWildernessLever();
this.swapNexus = config.swapNexus();
this.getGamesNecklace = config.getGamesNecklace();
this.getGamesNecklaceMode = config.getGamesNecklaceMode();
this.getDuelingRing = config.getDuelingRing();
this.getDuelingRingMode = config.getDuelingRingMode();
this.getGlory = config.getGlory();
this.getGloryMode = config.getGloryMode();
this.getSkillsNecklace = config.getSkillsNecklace();
this.getSkillsNecklaceMode = config.getSkillsNecklaceMode();
this.getNecklaceofPassage = config.getNecklaceofPassage();
this.getNecklaceofPassageMode = config.getNecklaceofPassageMode();
this.getDigsitePendant = config.getDigsitePendant();
this.getDigsitePendantMode = config.getDigsitePendantMode();
this.getCombatBracelet = config.getCombatBracelet();
this.getCombatBraceletMode = config.getCombatBraceletMode();
this.getBurningAmulet = config.getBurningAmulet();
this.getBurningAmuletMode = config.getBurningAmuletMode();
this.getXericsTalisman = config.getXericsTalisman();
this.getXericsTalismanMode = config.getXericsTalismanMode();
this.getRingofWealth = config.getRingofWealth();
this.getRingofWealthMode = config.getRingofWealthMode();
this.getSlayerRing = config.getSlayerRing();
this.getSlayerRingMode = config.getSlayerRingMode();
this.hideExamine = config.hideExamine();
this.hideTradeWith = config.hideTradeWith();
this.hideReport = config.hideReport();
this.hideLookup = config.hideLookup();
this.hideNet = config.hideNet();
this.hideBait = config.hideBait();
this.hideDestroyRunepouch = config.hideDestroyRunepouch();
this.hideDestroyCoalbag = config.hideDestroyCoalbag();
this.hideDestroyHerbsack = config.hideDestroyHerbsack();
this.hideDestroyBoltpouch = config.hideDestroyBoltpouch();
this.hideDestroyGembag = config.hideDestroyGembag();
this.hideDropRunecraftingPouch = config.hideDropRunecraftingPouch();
this.hideCastToB = config.hideCastToB();
this.hideCastIgnoredToB = Sets.newHashSet(Text.fromCSV(config.hideCastIgnoredToB().toLowerCase()));
this.hideCastCoX = config.hideCastCoX();
this.hideCastIgnoredCoX = Sets.newHashSet(Text.fromCSV(config.hideCastIgnoredCoX().toLowerCase()));
}
}
| 1 | 15,489 | aren't these the wrong way around? | open-osrs-runelite | java |
@@ -2027,6 +2027,10 @@ public class CoreContainer {
return configSetsHandler;
}
+ public ConfigSetService getCoreConfigService() {
+ return this.coreConfigService;
+ }
+
public String getHostName() {
return this.hostName;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core;
import java.io.Closeable;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.spec.InvalidKeySpecException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.auth.AuthSchemeProvider;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.config.Lookup;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.solr.api.ContainerPluginsRegistry;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder;
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.AuthSchemeRegistryProvider;
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.CredentialsProviderProvider;
import org.apache.solr.client.solrj.io.SolrClientCache;
import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
import org.apache.solr.cloud.CloudDescriptor;
import org.apache.solr.cloud.ClusterSingleton;
import org.apache.solr.cloud.OverseerTaskQueue;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.cluster.events.ClusterEventProducer;
import org.apache.solr.cluster.events.impl.ClusterEventProducerFactory;
import org.apache.solr.cluster.placement.PlacementPluginConfig;
import org.apache.solr.cluster.placement.PlacementPluginFactory;
import org.apache.solr.cluster.placement.impl.DelegatingPlacementPluginFactory;
import org.apache.solr.cluster.placement.impl.PlacementPluginFactoryLoader;
import org.apache.solr.common.AlreadyClosedException;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Replica.State;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.common.util.ObjectCache;
import org.apache.solr.common.util.SolrNamedThreadFactory;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.DirectoryFactory.DirContext;
import org.apache.solr.core.backup.repository.BackupRepository;
import org.apache.solr.core.backup.repository.BackupRepositoryFactory;
import org.apache.solr.filestore.PackageStoreAPI;
import org.apache.solr.handler.ClusterAPI;
import org.apache.solr.handler.CollectionsAPI;
import org.apache.solr.handler.RequestHandlerBase;
import org.apache.solr.handler.SnapShooter;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.handler.admin.ConfigSetsHandler;
import org.apache.solr.handler.admin.ContainerPluginsApi;
import org.apache.solr.handler.admin.CoreAdminHandler;
import org.apache.solr.handler.admin.HealthCheckHandler;
import org.apache.solr.handler.admin.InfoHandler;
import org.apache.solr.handler.admin.MetricsCollectorHandler;
import org.apache.solr.handler.admin.MetricsHandler;
import org.apache.solr.handler.admin.MetricsHistoryHandler;
import org.apache.solr.handler.admin.SecurityConfHandler;
import org.apache.solr.handler.admin.SecurityConfHandlerLocal;
import org.apache.solr.handler.admin.SecurityConfHandlerZk;
import org.apache.solr.handler.admin.ZookeeperInfoHandler;
import org.apache.solr.handler.admin.ZookeeperReadAPI;
import org.apache.solr.handler.admin.ZookeeperStatusHandler;
import org.apache.solr.handler.component.ShardHandlerFactory;
import org.apache.solr.handler.sql.CalciteSolrDriver;
import org.apache.solr.logging.LogWatcher;
import org.apache.solr.logging.MDCLoggingContext;
import org.apache.solr.metrics.SolrCoreMetricManager;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.metrics.SolrMetricProducer;
import org.apache.solr.metrics.SolrMetricsContext;
import org.apache.solr.pkg.PackageLoader;
import org.apache.solr.request.SolrRequestHandler;
import org.apache.solr.request.SolrRequestInfo;
import org.apache.solr.search.SolrFieldCacheBean;
import org.apache.solr.security.AuditLoggerPlugin;
import org.apache.solr.security.AuthenticationPlugin;
import org.apache.solr.security.AuthorizationPlugin;
import org.apache.solr.security.HttpClientBuilderPlugin;
import org.apache.solr.security.PKIAuthenticationPlugin;
import org.apache.solr.security.PublicKeyHandler;
import org.apache.solr.security.SecurityPluginHolder;
import org.apache.solr.update.SolrCoreState;
import org.apache.solr.update.UpdateShardHandler;
import org.apache.solr.util.OrderedExecutor;
import org.apache.solr.util.RefCounted;
import org.apache.solr.util.stats.MetricUtils;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.util.Objects.requireNonNull;
import static org.apache.solr.common.params.CommonParams.AUTHC_PATH;
import static org.apache.solr.common.params.CommonParams.AUTHZ_PATH;
import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.CONFIGSETS_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.CORES_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.INFO_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.METRICS_HISTORY_PATH;
import static org.apache.solr.common.params.CommonParams.METRICS_PATH;
import static org.apache.solr.common.params.CommonParams.ZK_PATH;
import static org.apache.solr.common.params.CommonParams.ZK_STATUS_PATH;
import static org.apache.solr.core.CorePropertiesLocator.PROPERTIES_FILENAME;
import static org.apache.solr.security.AuthenticationPlugin.AUTHENTICATION_PLUGIN_PROP;
/**
* @since solr 1.3
*/
public class CoreContainer {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
final SolrCores solrCores = new SolrCores(this);
public static class CoreLoadFailure {
public final CoreDescriptor cd;
public final Exception exception;
public CoreLoadFailure(CoreDescriptor cd, Exception loadFailure) {
this.cd = new CoreDescriptor(cd.getName(), cd);
this.exception = loadFailure;
}
}
private volatile PluginBag<SolrRequestHandler> containerHandlers = new PluginBag<>(SolrRequestHandler.class, null);
/**
* Minimize exposure to CoreContainer. Mostly only ZK interface is required
*/
public final Supplier<SolrZkClient> zkClientSupplier = () -> getZkController().getZkClient();
private final ContainerPluginsRegistry containerPluginsRegistry = new ContainerPluginsRegistry(this, containerHandlers.getApiBag());
protected final Map<String, CoreLoadFailure> coreInitFailures = new ConcurrentHashMap<>();
protected volatile CoreAdminHandler coreAdminHandler = null;
protected volatile CollectionsHandler collectionsHandler = null;
protected volatile HealthCheckHandler healthCheckHandler = null;
private volatile InfoHandler infoHandler;
protected volatile ConfigSetsHandler configSetsHandler = null;
private volatile PKIAuthenticationPlugin pkiAuthenticationPlugin;
protected volatile Properties containerProperties;
private volatile ConfigSetService coreConfigService;
protected final ZkContainer zkSys = new ZkContainer();
protected volatile ShardHandlerFactory shardHandlerFactory;
private volatile UpdateShardHandler updateShardHandler;
private volatile ExecutorService coreContainerWorkExecutor = ExecutorUtil.newMDCAwareCachedThreadPool(
new SolrNamedThreadFactory("coreContainerWorkExecutor"));
private final OrderedExecutor replayUpdatesExecutor;
@SuppressWarnings({"rawtypes"})
protected volatile LogWatcher logging = null;
private volatile CloserThread backgroundCloser = null;
protected final NodeConfig cfg;
protected final SolrResourceLoader loader;
protected final Path solrHome;
protected final CoresLocator coresLocator;
private volatile String hostName;
private final BlobRepository blobRepository = new BlobRepository(this);
private volatile boolean asyncSolrCoreLoad;
protected volatile SecurityConfHandler securityConfHandler;
private volatile SecurityPluginHolder<AuthorizationPlugin> authorizationPlugin;
private volatile SecurityPluginHolder<AuthenticationPlugin> authenticationPlugin;
private volatile SecurityPluginHolder<AuditLoggerPlugin> auditloggerPlugin;
private volatile BackupRepositoryFactory backupRepoFactory;
protected volatile SolrMetricManager metricManager;
protected volatile String metricTag = SolrMetricProducer.getUniqueMetricTag(this, null);
protected volatile SolrMetricsContext solrMetricsContext;
protected MetricsHandler metricsHandler;
protected volatile MetricsHistoryHandler metricsHistoryHandler;
protected volatile MetricsCollectorHandler metricsCollectorHandler;
private volatile SolrClientCache solrClientCache;
private final ObjectCache objectCache = new ObjectCache();
private final ClusterSingletons clusterSingletons = new ClusterSingletons(
() -> getZkController() != null &&
getZkController().getOverseer() != null &&
!getZkController().getOverseer().isClosed(),
(r) -> this.runAsync(r));
private volatile ClusterEventProducer clusterEventProducer;
private final DelegatingPlacementPluginFactory placementPluginFactory = new DelegatingPlacementPluginFactory();
private PackageStoreAPI packageStoreAPI;
private PackageLoader packageLoader;
private Set<Path> allowPaths;
// Bits for the state variable.
public final static long LOAD_COMPLETE = 0x1L;
public final static long CORE_DISCOVERY_COMPLETE = 0x2L;
public final static long INITIAL_CORE_LOAD_COMPLETE = 0x4L;
private volatile long status = 0L;
private ExecutorService coreContainerAsyncTaskExecutor = ExecutorUtil.newMDCAwareCachedThreadPool("Core Container Async Task");
private enum CoreInitFailedAction {fromleader, none}
/**
* This method instantiates a new instance of {@linkplain BackupRepository}.
*
* @param repositoryName The name of the backup repository (Optional).
* If not specified, a default implementation is used.
* @return a new instance of {@linkplain BackupRepository}.
*/
public BackupRepository newBackupRepository(String repositoryName) {
BackupRepository repository;
if (repositoryName != null) {
repository = backupRepoFactory.newInstance(getResourceLoader(), repositoryName);
} else {
repository = backupRepoFactory.newInstance(getResourceLoader());
}
return repository;
}
public ExecutorService getCoreZkRegisterExecutorService() {
return zkSys.getCoreZkRegisterExecutorService();
}
public SolrRequestHandler getRequestHandler(String path) {
return RequestHandlerBase.getRequestHandler(path, containerHandlers);
}
public PluginBag<SolrRequestHandler> getRequestHandlers() {
return this.containerHandlers;
}
{
if (log.isDebugEnabled()) {
log.debug("New CoreContainer {}", System.identityHashCode(this));
}
}
/**
* Create a new CoreContainer using the given solr home directory. The container's
* cores are not loaded.
*
* @param solrHome a String containing the path to the solr home directory
* @param properties substitutable properties (alternative to Sys props)
* @see #load()
*/
public CoreContainer(Path solrHome, Properties properties) {
this(SolrXmlConfig.fromSolrHome(solrHome, properties));
}
/**
* Create a new CoreContainer using the given SolrResourceLoader,
* configuration and CoresLocator. The container's cores are
* not loaded.
*
* @param config a ConfigSolr representation of this container's configuration
* @see #load()
*/
public CoreContainer(NodeConfig config) {
this(config, new CorePropertiesLocator(config.getCoreRootDirectory()));
}
public CoreContainer(NodeConfig config, boolean asyncSolrCoreLoad) {
this(config, new CorePropertiesLocator(config.getCoreRootDirectory()), asyncSolrCoreLoad);
}
public CoreContainer(NodeConfig config, CoresLocator locator) {
this(config, locator, false);
}
public CoreContainer(NodeConfig config, CoresLocator locator, boolean asyncSolrCoreLoad) {
this.loader = config.getSolrResourceLoader();
this.solrHome = config.getSolrHome();
this.cfg = requireNonNull(config);
try {
containerHandlers.put(PublicKeyHandler.PATH, new PublicKeyHandler(cfg.getCloudConfig()));
} catch (IOException | InvalidKeySpecException e) {
throw new RuntimeException("Bad PublicKeyHandler configuration.", e);
}
if (null != this.cfg.getBooleanQueryMaxClauseCount()) {
IndexSearcher.setMaxClauseCount(this.cfg.getBooleanQueryMaxClauseCount());
}
this.coresLocator = locator;
this.containerProperties = new Properties(config.getSolrProperties());
this.asyncSolrCoreLoad = asyncSolrCoreLoad;
this.replayUpdatesExecutor = new OrderedExecutor(
cfg.getReplayUpdatesThreads(),
ExecutorUtil.newMDCAwareCachedThreadPool(
cfg.getReplayUpdatesThreads(),
new SolrNamedThreadFactory("replayUpdatesExecutor")));
this.allowPaths = new java.util.HashSet<>();
this.allowPaths.add(cfg.getSolrHome());
this.allowPaths.add(cfg.getCoreRootDirectory());
if (cfg.getSolrDataHome() != null) {
this.allowPaths.add(cfg.getSolrDataHome());
}
if (!cfg.getAllowPaths().isEmpty()) {
this.allowPaths.addAll(cfg.getAllowPaths());
if (log.isInfoEnabled()) {
log.info("Allowing use of paths: {}", cfg.getAllowPaths());
}
}
Path userFilesPath = getUserFilesPath(); // TODO make configurable on cfg?
try {
Files.createDirectories(userFilesPath); // does nothing if already exists
} catch (Exception e) {
log.warn("Unable to create [{}]. Features requiring this directory may fail.", userFilesPath, e);
}
}
@SuppressWarnings({"unchecked"})
private synchronized void initializeAuthorizationPlugin(Map<String, Object> authorizationConf) {
authorizationConf = Utils.getDeepCopy(authorizationConf, 4);
int newVersion = readVersion(authorizationConf);
//Initialize the Authorization module
SecurityPluginHolder<AuthorizationPlugin> old = authorizationPlugin;
SecurityPluginHolder<AuthorizationPlugin> authorizationPlugin = null;
if (authorizationConf != null) {
String klas = (String) authorizationConf.get("class");
if (klas == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "class is required for authorization plugin");
}
if (old != null && old.getZnodeVersion() == newVersion && newVersion > 0) {
log.debug("Authorization config not modified");
return;
}
log.info("Initializing authorization plugin: {}", klas);
authorizationPlugin = new SecurityPluginHolder<>(newVersion,
getResourceLoader().newInstance(klas, AuthorizationPlugin.class));
// Read and pass the authorization context to the plugin
authorizationPlugin.plugin.init(authorizationConf);
} else {
log.debug("Security conf doesn't exist. Skipping setup for authorization module.");
}
this.authorizationPlugin = authorizationPlugin;
if (old != null) {
try {
old.plugin.close();
} catch (Exception e) {
log.error("Exception while attempting to close old authorization plugin", e);
}
}
}
@SuppressWarnings({"unchecked"})
private void initializeAuditloggerPlugin(Map<String, Object> auditConf) {
auditConf = Utils.getDeepCopy(auditConf, 4);
int newVersion = readVersion(auditConf);
//Initialize the Auditlog module
SecurityPluginHolder<AuditLoggerPlugin> old = auditloggerPlugin;
SecurityPluginHolder<AuditLoggerPlugin> newAuditloggerPlugin = null;
if (auditConf != null) {
String klas = (String) auditConf.get("class");
if (klas == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "class is required for auditlogger plugin");
}
if (old != null && old.getZnodeVersion() == newVersion && newVersion > 0) {
log.debug("Auditlogger config not modified");
return;
}
log.info("Initializing auditlogger plugin: {}", klas);
newAuditloggerPlugin = new SecurityPluginHolder<>(newVersion,
getResourceLoader().newInstance(klas, AuditLoggerPlugin.class));
newAuditloggerPlugin.plugin.init(auditConf);
newAuditloggerPlugin.plugin.initializeMetrics(solrMetricsContext, "/auditlogging");
} else {
log.debug("Security conf doesn't exist. Skipping setup for audit logging module.");
}
this.auditloggerPlugin = newAuditloggerPlugin;
if (old != null) {
try {
old.plugin.close();
} catch (Exception e) {
log.error("Exception while attempting to close old auditlogger plugin", e);
}
}
}
@SuppressWarnings({"unchecked", "rawtypes"})
private synchronized void initializeAuthenticationPlugin(Map<String, Object> authenticationConfig) {
authenticationConfig = Utils.getDeepCopy(authenticationConfig, 4);
int newVersion = readVersion(authenticationConfig);
String pluginClassName = null;
if (authenticationConfig != null) {
if (authenticationConfig.containsKey("class")) {
pluginClassName = String.valueOf(authenticationConfig.get("class"));
} else {
throw new SolrException(ErrorCode.SERVER_ERROR, "No 'class' specified for authentication in ZK.");
}
}
if (pluginClassName != null) {
log.debug("Authentication plugin class obtained from security.json: {}", pluginClassName);
} else if (System.getProperty(AUTHENTICATION_PLUGIN_PROP) != null) {
pluginClassName = System.getProperty(AUTHENTICATION_PLUGIN_PROP);
log.debug("Authentication plugin class obtained from system property '{}': {}"
, AUTHENTICATION_PLUGIN_PROP, pluginClassName);
} else {
log.debug("No authentication plugin used.");
}
SecurityPluginHolder<AuthenticationPlugin> old = authenticationPlugin;
SecurityPluginHolder<AuthenticationPlugin> authenticationPlugin = null;
if (old != null && old.getZnodeVersion() == newVersion && newVersion > 0) {
log.debug("Authentication config not modified");
return;
}
// Initialize the plugin
if (pluginClassName != null) {
log.info("Initializing authentication plugin: {}", pluginClassName);
authenticationPlugin = new SecurityPluginHolder<>(newVersion,
getResourceLoader().newInstance(pluginClassName,
AuthenticationPlugin.class,
null,
new Class[]{CoreContainer.class},
new Object[]{this}));
}
if (authenticationPlugin != null) {
authenticationPlugin.plugin.init(authenticationConfig);
setupHttpClientForAuthPlugin(authenticationPlugin.plugin);
authenticationPlugin.plugin.initializeMetrics(solrMetricsContext, "/authentication");
}
this.authenticationPlugin = authenticationPlugin;
try {
if (old != null) old.plugin.close();
} catch (Exception e) {
log.error("Exception while attempting to close old authentication plugin", e);
}
}
private void setupHttpClientForAuthPlugin(Object authcPlugin) {
if (authcPlugin instanceof HttpClientBuilderPlugin) {
// Setup HttpClient for internode communication
HttpClientBuilderPlugin builderPlugin = ((HttpClientBuilderPlugin) authcPlugin);
SolrHttpClientBuilder builder = builderPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
shardHandlerFactory.setSecurityBuilder(builderPlugin);
updateShardHandler.setSecurityBuilder(builderPlugin);
// The default http client of the core container's shardHandlerFactory has already been created and
// configured using the default httpclient configurer. We need to reconfigure it using the plugin's
// http client configurer to set it up for internode communication.
log.debug("Reconfiguring HttpClient settings.");
SolrHttpClientContextBuilder httpClientBuilder = new SolrHttpClientContextBuilder();
if (builder.getCredentialsProviderProvider() != null) {
httpClientBuilder.setDefaultCredentialsProvider(new CredentialsProviderProvider() {
@Override
public CredentialsProvider getCredentialsProvider() {
return builder.getCredentialsProviderProvider().getCredentialsProvider();
}
});
}
if (builder.getAuthSchemeRegistryProvider() != null) {
httpClientBuilder.setAuthSchemeRegistryProvider(new AuthSchemeRegistryProvider() {
@Override
public Lookup<AuthSchemeProvider> getAuthSchemeRegistry() {
return builder.getAuthSchemeRegistryProvider().getAuthSchemeRegistry();
}
});
}
HttpClientUtil.setHttpClientRequestContextBuilder(httpClientBuilder);
}
// Always register PKI auth interceptor, which will then delegate the decision of who should secure
// each request to the configured authentication plugin.
if (pkiAuthenticationPlugin != null && !pkiAuthenticationPlugin.isInterceptorRegistered()) {
pkiAuthenticationPlugin.getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
shardHandlerFactory.setSecurityBuilder(pkiAuthenticationPlugin);
updateShardHandler.setSecurityBuilder(pkiAuthenticationPlugin);
}
}
@SuppressWarnings({"rawtypes"})
private static int readVersion(Map<String, Object> conf) {
if (conf == null) return -1;
Map meta = (Map) conf.get("");
if (meta == null) return -1;
Number v = (Number) meta.get("v");
return v == null ? -1 : v.intValue();
}
/**
* This method allows subclasses to construct a CoreContainer
* without any default init behavior.
*
* @param testConstructor pass (Object)null.
* @lucene.experimental
*/
protected CoreContainer(Object testConstructor) {
solrHome = null;
loader = null;
coresLocator = null;
cfg = null;
containerProperties = null;
replayUpdatesExecutor = null;
}
public static CoreContainer createAndLoad(Path solrHome) {
return createAndLoad(solrHome, solrHome.resolve(SolrXmlConfig.SOLR_XML_FILE));
}
/**
* Create a new CoreContainer and load its cores
*
* @param solrHome the solr home directory
* @param configFile the file containing this container's configuration
* @return a loaded CoreContainer
*/
public static CoreContainer createAndLoad(Path solrHome, Path configFile) {
CoreContainer cc = new CoreContainer(SolrXmlConfig.fromFile(solrHome, configFile, new Properties()));
try {
cc.load();
} catch (Exception e) {
cc.shutdown();
throw e;
}
return cc;
}
public Properties getContainerProperties() {
return containerProperties;
}
public PKIAuthenticationPlugin getPkiAuthenticationPlugin() {
return pkiAuthenticationPlugin;
}
public SolrMetricManager getMetricManager() {
return metricManager;
}
public MetricsHandler getMetricsHandler() {
return metricsHandler;
}
public MetricsHistoryHandler getMetricsHistoryHandler() {
return metricsHistoryHandler;
}
public OrderedExecutor getReplayUpdatesExecutor() {
return replayUpdatesExecutor;
}
public PackageLoader getPackageLoader() {
return packageLoader;
}
public PackageStoreAPI getPackageStoreAPI() {
return packageStoreAPI;
}
public SolrClientCache getSolrClientCache() {
return solrClientCache;
}
public ObjectCache getObjectCache() {
return objectCache;
}
//-------------------------------------------------------------------
// Initialization / Cleanup
//-------------------------------------------------------------------
/**
* Load the cores defined for this CoreContainer
*/
public void load() {
if (log.isDebugEnabled()) {
log.debug("Loading cores into CoreContainer [instanceDir={}]", getSolrHome());
}
// Always add $SOLR_HOME/lib to the shared resource loader
Set<String> libDirs = new LinkedHashSet<>();
libDirs.add("lib");
if (!StringUtils.isBlank(cfg.getSharedLibDirectory())) {
List<String> sharedLibs = Arrays.asList(cfg.getSharedLibDirectory().split("\\s*,\\s*"));
libDirs.addAll(sharedLibs);
}
boolean modified = false;
// add the sharedLib to the shared resource loader before initializing cfg based plugins
for (String libDir : libDirs) {
Path libPath = Paths.get(getSolrHome()).resolve(libDir);
if (Files.exists(libPath)) {
try {
loader.addToClassLoader(SolrResourceLoader.getURLs(libPath));
modified = true;
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Couldn't load libs: " + e, e);
}
}
}
if (modified) {
loader.reloadLuceneSPI();
}
ClusterEventProducerFactory clusterEventProducerFactory = new ClusterEventProducerFactory(this);
clusterEventProducer = clusterEventProducerFactory;
containerPluginsRegistry.registerListener(clusterSingletons.getPluginRegistryListener());
containerPluginsRegistry.registerListener(clusterEventProducerFactory.getPluginRegistryListener());
packageStoreAPI = new PackageStoreAPI(this);
containerHandlers.getApiBag().registerObject(packageStoreAPI.readAPI);
containerHandlers.getApiBag().registerObject(packageStoreAPI.writeAPI);
metricManager = new SolrMetricManager(loader, cfg.getMetricsConfig());
String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.node);
solrMetricsContext = new SolrMetricsContext(metricManager, registryName, metricTag);
coreContainerWorkExecutor = MetricUtils.instrumentedExecutorService(
coreContainerWorkExecutor, null,
metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)),
SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
if (shardHandlerFactory instanceof SolrMetricProducer) {
SolrMetricProducer metricProducer = (SolrMetricProducer) shardHandlerFactory;
metricProducer.initializeMetrics(solrMetricsContext, "httpShardHandler");
}
updateShardHandler = new UpdateShardHandler(cfg.getUpdateShardHandlerConfig());
updateShardHandler.initializeMetrics(solrMetricsContext, "updateShardHandler");
solrClientCache = new SolrClientCache(updateShardHandler.getDefaultHttpClient());
// initialize CalciteSolrDriver instance to use this solrClientCache
CalciteSolrDriver.INSTANCE.setSolrClientCache(solrClientCache);
solrCores.load(loader);
logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
hostName = cfg.getNodeName();
zkSys.initZooKeeper(this, cfg.getCloudConfig());
if (isZooKeeperAware()) {
pkiAuthenticationPlugin = new PKIAuthenticationPlugin(this, zkSys.getZkController().getNodeName(),
(PublicKeyHandler) containerHandlers.get(PublicKeyHandler.PATH));
// use deprecated API for back-compat, remove in 9.0
pkiAuthenticationPlugin.initializeMetrics(solrMetricsContext, "/authentication/pki");
TracerConfigurator.loadTracer(loader, cfg.getTracerConfiguratorPluginInfo(), getZkController().getZkStateReader());
packageLoader = new PackageLoader(this);
containerHandlers.getApiBag().registerObject(packageLoader.getPackageAPI().editAPI);
containerHandlers.getApiBag().registerObject(packageLoader.getPackageAPI().readAPI);
ZookeeperReadAPI zookeeperReadAPI = new ZookeeperReadAPI(this);
containerHandlers.getApiBag().registerObject(zookeeperReadAPI);
}
MDCLoggingContext.setNode(this);
securityConfHandler = isZooKeeperAware() ? new SecurityConfHandlerZk(this) : new SecurityConfHandlerLocal(this);
reloadSecurityProperties();
warnUsersOfInsecureSettings();
this.backupRepoFactory = new BackupRepositoryFactory(cfg.getBackupRepositoryPlugins());
createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
createHandler(ZK_STATUS_PATH, ZookeeperStatusHandler.class.getName(), ZookeeperStatusHandler.class);
collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
final CollectionsAPI collectionsAPI = new CollectionsAPI(collectionsHandler);
containerHandlers.getApiBag().registerObject(collectionsAPI);
containerHandlers.getApiBag().registerObject(collectionsAPI.collectionsCommands);
configSetsHandler = createHandler(CONFIGSETS_HANDLER_PATH, cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class);
ClusterAPI clusterAPI = new ClusterAPI(collectionsHandler, configSetsHandler);
containerHandlers.getApiBag().registerObject(clusterAPI);
containerHandlers.getApiBag().registerObject(clusterAPI.commands);
containerHandlers.getApiBag().registerObject(clusterAPI.configSetCommands);
/*
* HealthCheckHandler needs to be initialized before InfoHandler, since the later one will call CoreContainer.getHealthCheckHandler().
* We don't register the handler here because it'll be registered inside InfoHandler
*/
healthCheckHandler = loader.newInstance(cfg.getHealthCheckHandlerClass(), HealthCheckHandler.class, null, new Class<?>[]{CoreContainer.class}, new Object[]{this});
infoHandler = createHandler(INFO_HANDLER_PATH, cfg.getInfoHandlerClass(), InfoHandler.class);
coreAdminHandler = createHandler(CORES_HANDLER_PATH, cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class);
// metricsHistoryHandler uses metricsHandler, so create it first
metricsHandler = new MetricsHandler(this);
containerHandlers.put(METRICS_PATH, metricsHandler);
metricsHandler.initializeMetrics(solrMetricsContext, METRICS_PATH);
createMetricsHistoryHandler();
if (cfg.getMetricsConfig().isEnabled()) {
metricsCollectorHandler = createHandler(MetricsCollectorHandler.HANDLER_PATH, MetricsCollectorHandler.class.getName(), MetricsCollectorHandler.class);
// may want to add some configuration here in the future
metricsCollectorHandler.init(null);
}
containerHandlers.put(AUTHZ_PATH, securityConfHandler);
securityConfHandler.initializeMetrics(solrMetricsContext, AUTHZ_PATH);
containerHandlers.put(AUTHC_PATH, securityConfHandler);
PluginInfo[] metricReporters = cfg.getMetricsConfig().getMetricReporters();
metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.node);
metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.jvm);
metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.jetty);
coreConfigService = ConfigSetService.createConfigSetService(cfg, loader, zkSys.zkController);
containerProperties.putAll(cfg.getSolrProperties());
// initialize gauges for reporting the number of cores and disk total/free
solrMetricsContext.gauge(solrCores::getNumLoadedPermanentCores,
true, "loaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
solrMetricsContext.gauge(solrCores::getNumLoadedTransientCores,
true, "lazy", SolrInfoBean.Category.CONTAINER.toString(), "cores");
solrMetricsContext.gauge(solrCores::getNumUnloadedCores,
true, "unloaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
Path dataHome = cfg.getSolrDataHome() != null ? cfg.getSolrDataHome() : cfg.getCoreRootDirectory();
solrMetricsContext.gauge(() -> dataHome.toFile().getTotalSpace(),
true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
solrMetricsContext.gauge(() -> dataHome.toFile().getUsableSpace(),
true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
solrMetricsContext.gauge(dataHome::toString,
true, "path", SolrInfoBean.Category.CONTAINER.toString(), "fs");
solrMetricsContext.gauge(() -> cfg.getCoreRootDirectory().toFile().getTotalSpace(),
true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
solrMetricsContext.gauge(() -> cfg.getCoreRootDirectory().toFile().getUsableSpace(),
true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
solrMetricsContext.gauge(() -> cfg.getCoreRootDirectory().toString(),
true, "path", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
// add version information
solrMetricsContext.gauge(() -> this.getClass().getPackage().getSpecificationVersion(),
true, "specification", SolrInfoBean.Category.CONTAINER.toString(), "version");
solrMetricsContext.gauge(() -> this.getClass().getPackage().getImplementationVersion(),
true, "implementation", SolrInfoBean.Category.CONTAINER.toString(), "version");
SolrFieldCacheBean fieldCacheBean = new SolrFieldCacheBean();
fieldCacheBean.initializeMetrics(solrMetricsContext, null);
if (isZooKeeperAware()) {
metricManager.loadClusterReporters(metricReporters, this);
}
// setup executor to load cores in parallel
ExecutorService coreLoadExecutor = MetricUtils.instrumentedExecutorService(
ExecutorUtil.newMDCAwareFixedThreadPool(
cfg.getCoreLoadThreadCount(isZooKeeperAware()),
new SolrNamedThreadFactory("coreLoadExecutor")), null,
metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)),
SolrMetricManager.mkName("coreLoadExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
final List<Future<SolrCore>> futures = new ArrayList<>();
try {
List<CoreDescriptor> cds = coresLocator.discover(this);
cds = CoreSorter.sortCores(this, cds);
checkForDuplicateCoreNames(cds);
status |= CORE_DISCOVERY_COMPLETE;
for (final CoreDescriptor cd : cds) {
if (cd.isTransient() || !cd.isLoadOnStartup()) {
solrCores.addCoreDescriptor(cd);
} else if (asyncSolrCoreLoad) {
solrCores.markCoreAsLoading(cd);
}
if (cd.isLoadOnStartup()) {
futures.add(coreLoadExecutor.submit(() -> {
SolrCore core;
try {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
}
solrCores.waitAddPendingCoreOps(cd.getName());
core = createFromDescriptor(cd, false, false);
} finally {
solrCores.removeFromPendingOps(cd.getName());
if (asyncSolrCoreLoad) {
solrCores.markCoreAsNotLoading(cd);
}
}
try {
zkSys.registerInZk(core, true, false);
} catch (RuntimeException e) {
SolrException.log(log, "Error registering SolrCore", e);
}
return core;
}));
}
}
// Start the background thread
backgroundCloser = new CloserThread(this, solrCores, cfg);
backgroundCloser.start();
} finally {
if (asyncSolrCoreLoad && futures != null) {
coreContainerWorkExecutor.submit(() -> {
try {
for (Future<SolrCore> future : futures) {
try {
future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
log.error("Error waiting for SolrCore to be loaded on startup", e);
}
}
} finally {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
});
} else {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
}
if (isZooKeeperAware()) {
containerPluginsRegistry.refresh();
getZkController().zkStateReader.registerClusterPropertiesListener(containerPluginsRegistry);
ContainerPluginsApi containerPluginsApi = new ContainerPluginsApi(this);
containerHandlers.getApiBag().registerObject(containerPluginsApi.readAPI);
containerHandlers.getApiBag().registerObject(containerPluginsApi.editAPI);
// initialize the placement plugin factory wrapper
// with the plugin configuration from the registry
PlacementPluginFactoryLoader.load(placementPluginFactory, containerPluginsRegistry);
// create target ClusterEventProducer (possibly from plugins)
clusterEventProducer = clusterEventProducerFactory.create(containerPluginsRegistry);
// init ClusterSingleton-s
// register the handlers that are also ClusterSingleton
containerHandlers.keySet().forEach(handlerName -> {
SolrRequestHandler handler = containerHandlers.get(handlerName);
if (handler instanceof ClusterSingleton) {
ClusterSingleton singleton = (ClusterSingleton) handler;
clusterSingletons.getSingletons().put(singleton.getName(), singleton);
}
});
clusterSingletons.setReady();
zkSys.getZkController().checkOverseerDesignate();
}
// This is a bit redundant but these are two distinct concepts for all they're accomplished at the same time.
status |= LOAD_COMPLETE | INITIAL_CORE_LOAD_COMPLETE;
}
// MetricsHistoryHandler supports both cloud and standalone configs
@SuppressWarnings({"unchecked"})
private void createMetricsHistoryHandler() {
PluginInfo plugin = cfg.getMetricsConfig().getHistoryHandler();
if (plugin != null && MetricsConfig.NOOP_IMPL_CLASS.equals(plugin.className)) {
// still create the handler but it will be disabled
plugin = null;
}
Map<String, Object> initArgs;
if (plugin != null && plugin.initArgs != null) {
initArgs = plugin.initArgs.asMap(5);
initArgs.putIfAbsent(MetricsHistoryHandler.ENABLE_PROP, plugin.isEnabled());
} else {
initArgs = new HashMap<>();
}
String name;
SolrCloudManager cloudManager;
SolrClient client;
if (isZooKeeperAware()) {
name = getZkController().getNodeName();
cloudManager = getZkController().getSolrCloudManager();
client = new CloudSolrClient.Builder(Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
.withSocketTimeout(30000).withConnectionTimeout(15000)
.withHttpClient(updateShardHandler.getDefaultHttpClient()).build();
} else {
name = getNodeConfig().getNodeName();
if (name == null || name.isEmpty()) {
name = "localhost";
}
cloudManager = null;
client = new EmbeddedSolrServer(this, null);
// enable local metrics unless specifically set otherwise
initArgs.putIfAbsent(MetricsHistoryHandler.ENABLE_NODES_PROP, true);
initArgs.putIfAbsent(MetricsHistoryHandler.ENABLE_REPLICAS_PROP, true);
}
metricsHistoryHandler = new MetricsHistoryHandler(name, metricsHandler,
client, cloudManager, initArgs);
containerHandlers.put(METRICS_HISTORY_PATH, metricsHistoryHandler);
metricsHistoryHandler.initializeMetrics(solrMetricsContext, METRICS_HISTORY_PATH);
}
public void securityNodeChanged() {
log.info("Security node changed, reloading security.json");
reloadSecurityProperties();
}
/**
* Make sure securityConfHandler is initialized
*/
@SuppressWarnings({"unchecked"})
private void reloadSecurityProperties() {
SecurityConfHandler.SecurityConfig securityConfig = securityConfHandler.getSecurityConfig(false);
initializeAuthorizationPlugin((Map<String, Object>) securityConfig.getData().get("authorization"));
initializeAuthenticationPlugin((Map<String, Object>) securityConfig.getData().get("authentication"));
initializeAuditloggerPlugin((Map<String, Object>) securityConfig.getData().get("auditlogging"));
}
private void warnUsersOfInsecureSettings() {
if (authenticationPlugin == null || authorizationPlugin == null) {
log.warn("Not all security plugins configured! authentication={} authorization={}. Solr is only as secure as " +
"you make it. Consider configuring authentication/authorization before exposing Solr to users internal or " +
"external. See https://s.apache.org/solrsecurity for more info",
(authenticationPlugin != null) ? "enabled" : "disabled",
(authorizationPlugin != null) ? "enabled" : "disabled");
}
if (authenticationPlugin != null && StringUtils.isEmpty(System.getProperty("solr.jetty.https.port"))) {
log.warn("Solr authentication is enabled, but SSL is off. Consider enabling SSL to protect user credentials and data with encryption.");
}
}
private static void checkForDuplicateCoreNames(List<CoreDescriptor> cds) {
Map<String, Path> addedCores = Maps.newHashMap();
for (CoreDescriptor cd : cds) {
final String name = cd.getName();
if (addedCores.containsKey(name))
throw new SolrException(ErrorCode.SERVER_ERROR,
String.format(Locale.ROOT, "Found multiple cores with the name [%s], with instancedirs [%s] and [%s]",
name, addedCores.get(name), cd.getInstanceDir()));
addedCores.put(name, cd.getInstanceDir());
}
}
private volatile boolean isShutDown = false;
public boolean isShutDown() {
return isShutDown;
}
public void shutdown() {
ZkController zkController = getZkController();
if (zkController != null) {
OverseerTaskQueue overseerCollectionQueue = zkController.getOverseerCollectionQueue();
overseerCollectionQueue.allowOverseerPendingTasksToComplete();
}
if (log.isInfoEnabled()) {
log.info("Shutting down CoreContainer instance={}", System.identityHashCode(this));
}
ExecutorUtil.shutdownAndAwaitTermination(coreContainerAsyncTaskExecutor);
ExecutorService customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrNamedThreadFactory("closeThreadPool"));
isShutDown = true;
try {
if (isZooKeeperAware()) {
cancelCoreRecoveries();
zkSys.zkController.preClose();
/*
* Pause updates for all cores on this node and wait for all in-flight update requests to finish.
* Here, we (slightly) delay leader election so that in-flight update requests succeed and we can preserve consistency.
*
* Jetty already allows a grace period for in-flight requests to complete and our solr cores, searchers etc
* are reference counted to allow for graceful shutdown. So we don't worry about any other kind of requests.
*
* We do not need to unpause ever because the node is being shut down.
*/
getCores().parallelStream().forEach(solrCore -> {
SolrCoreState solrCoreState = solrCore.getSolrCoreState();
try {
solrCoreState.pauseUpdatesAndAwaitInflightRequests();
} catch (TimeoutException e) {
log.warn("Timed out waiting for in-flight update requests to complete for core: {}", solrCore.getName());
} catch (InterruptedException e) {
log.warn("Interrupted while waiting for in-flight update requests to complete for core: {}", solrCore.getName());
Thread.currentThread().interrupt();
}
});
zkSys.zkController.tryCancelAllElections();
}
ExecutorUtil.shutdownAndAwaitTermination(coreContainerWorkExecutor);
// First wake up the closer thread, it'll terminate almost immediately since it checks isShutDown.
synchronized (solrCores.getModifyLock()) {
solrCores.getModifyLock().notifyAll(); // wake up anyone waiting
}
if (backgroundCloser != null) { // Doesn't seem right, but tests get in here without initializing the core.
try {
while (true) {
backgroundCloser.join(15000);
if (backgroundCloser.isAlive()) {
synchronized (solrCores.getModifyLock()) {
solrCores.getModifyLock().notifyAll(); // there is a race we have to protect against
}
} else {
break;
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
if (log.isDebugEnabled()) {
log.debug("backgroundCloser thread was interrupted before finishing");
}
}
}
// Now clear all the cores that are being operated upon.
solrCores.close();
objectCache.clear();
// It's still possible that one of the pending dynamic load operation is waiting, so wake it up if so.
// Since all the pending operations queues have been drained, there should be nothing to do.
synchronized (solrCores.getModifyLock()) {
solrCores.getModifyLock().notifyAll(); // wake up the thread
}
customThreadPool.submit(() -> {
replayUpdatesExecutor.shutdownAndAwaitTermination();
});
if (metricsHistoryHandler != null) {
metricsHistoryHandler.close();
IOUtils.closeQuietly(metricsHistoryHandler.getSolrClient());
}
if (metricManager != null) {
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node));
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm));
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jetty));
metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node), metricTag);
metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm), metricTag);
metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jetty), metricTag);
}
if (isZooKeeperAware()) {
cancelCoreRecoveries();
if (metricManager != null) {
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.cluster));
}
}
try {
if (coreAdminHandler != null) {
customThreadPool.submit(() -> {
coreAdminHandler.shutdown();
});
}
} catch (Exception e) {
log.warn("Error shutting down CoreAdminHandler. Continuing to close CoreContainer.", e);
}
if (solrClientCache != null) {
solrClientCache.close();
}
if (containerPluginsRegistry != null) {
IOUtils.closeQuietly(containerPluginsRegistry);
}
} finally {
try {
if (shardHandlerFactory != null) {
customThreadPool.submit(() -> {
shardHandlerFactory.close();
});
}
} finally {
try {
if (updateShardHandler != null) {
customThreadPool.submit(() -> Collections.singleton(shardHandlerFactory).parallelStream().forEach(c -> {
updateShardHandler.close();
}));
}
} finally {
try {
// we want to close zk stuff last
zkSys.close();
} finally {
ExecutorUtil.shutdownAndAwaitTermination(customThreadPool);
}
}
}
}
// It should be safe to close the authorization plugin at this point.
try {
if (authorizationPlugin != null) {
authorizationPlugin.plugin.close();
}
} catch (IOException e) {
log.warn("Exception while closing authorization plugin.", e);
}
// It should be safe to close the authentication plugin at this point.
try {
if (authenticationPlugin != null) {
authenticationPlugin.plugin.close();
authenticationPlugin = null;
}
} catch (Exception e) {
log.warn("Exception while closing authentication plugin.", e);
}
// It should be safe to close the auditlogger plugin at this point.
try {
if (auditloggerPlugin != null) {
auditloggerPlugin.plugin.close();
auditloggerPlugin = null;
}
} catch (Exception e) {
log.warn("Exception while closing auditlogger plugin.", e);
}
if(packageLoader != null){
org.apache.lucene.util.IOUtils.closeWhileHandlingException(packageLoader);
}
org.apache.lucene.util.IOUtils.closeWhileHandlingException(loader); // best effort
}
public void cancelCoreRecoveries() {
List<SolrCore> cores = solrCores.getCores();
// we must cancel without holding the cores sync
// make sure we wait for any recoveries to stop
for (SolrCore core : cores) {
try {
core.getSolrCoreState().cancelRecovery();
} catch (Exception e) {
SolrException.log(log, "Error canceling recovery for core", e);
}
}
}
public CoresLocator getCoresLocator() {
return coresLocator;
}
protected SolrCore registerCore(CoreDescriptor cd, SolrCore core, boolean registerInZk, boolean skipRecovery) {
if (core == null) {
throw new RuntimeException("Can not register a null core.");
}
if (isShutDown) {
core.close();
throw new IllegalStateException("This CoreContainer has been closed");
}
assert core.getName().equals(cd.getName()) : "core name " + core.getName() + " != cd " + cd.getName();
SolrCore old = solrCores.putCore(cd, core);
coreInitFailures.remove(cd.getName());
if (old == null || old == core) {
if (log.isDebugEnabled()) {
log.debug("registering core: {}", cd.getName());
}
if (registerInZk) {
zkSys.registerInZk(core, false, skipRecovery);
}
return null;
} else {
if (log.isDebugEnabled()) {
log.debug("replacing core: {}", cd.getName());
}
old.close();
if (registerInZk) {
zkSys.registerInZk(core, false, skipRecovery);
}
return old;
}
}
/**
* Creates a new core, publishing the core state to the cluster
*
* @param coreName the core name
* @param parameters the core parameters
* @return the newly created core
*/
public SolrCore create(String coreName, Map<String, String> parameters) {
return create(coreName, cfg.getCoreRootDirectory().resolve(coreName), parameters, false);
}
final Set<String> inFlightCreations = ConcurrentHashMap.newKeySet(); // See SOLR-14969
/**
* Creates a new core in a specified instance directory, publishing the core state to the cluster
*
* @param coreName the core name
* @param instancePath the instance directory
* @param parameters the core parameters
* @return the newly created core
*/
public SolrCore create(String coreName, Path instancePath, Map<String, String> parameters, boolean newCollection) {
boolean iAdded = false;
try {
iAdded = inFlightCreations.add(coreName);
if (! iAdded) {
String msg = "Already creating a core with name '" + coreName + "', call aborted '";
log.warn(msg);
throw new SolrException(ErrorCode.CONFLICT, msg);
}
CoreDescriptor cd = new CoreDescriptor(coreName, instancePath, parameters, getContainerProperties(), getZkController());
// Since the core descriptor is removed when a core is unloaded, it should never be anywhere when a core is created.
if (getCoreDescriptor(coreName) != null) {
log.warn("Creating a core with existing name is not allowed: '{}'", coreName);
// TODO: Shouldn't this be a BAD_REQUEST?
throw new SolrException(ErrorCode.SERVER_ERROR, "Core with name '" + coreName + "' already exists.");
}
// Validate paths are relative to known locations to avoid path traversal
assertPathAllowed(cd.getInstanceDir());
assertPathAllowed(Paths.get(cd.getDataDir()));
boolean preExisitingZkEntry = false;
try {
if (getZkController() != null) {
if (cd.getCloudDescriptor().getCoreNodeName() == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "coreNodeName missing " + parameters.toString());
}
preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd);
}
// Much of the logic in core handling pre-supposes that the core.properties file already exists, so create it
// first and clean it up if there's an error.
coresLocator.create(this, cd);
SolrCore core;
try {
solrCores.waitAddPendingCoreOps(cd.getName());
core = createFromDescriptor(cd, true, newCollection);
coresLocator.persist(this, cd); // Write out the current core properties in case anything changed when the core was created
} finally {
solrCores.removeFromPendingOps(cd.getName());
}
return core;
} catch (Exception ex) {
// First clean up any core descriptor, there should never be an existing core.properties file for any core that
// failed to be created on-the-fly.
coresLocator.delete(this, cd);
if (isZooKeeperAware() && !preExisitingZkEntry) {
try {
getZkController().unregister(coreName, cd);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
SolrException.log(log, null, e);
} catch (KeeperException e) {
SolrException.log(log, null, e);
} catch (Exception e) {
SolrException.log(log, null, e);
}
}
Throwable tc = ex;
Throwable c = null;
do {
tc = tc.getCause();
if (tc != null) {
c = tc;
}
} while (tc != null);
String rootMsg = "";
if (c != null) {
rootMsg = " Caused by: " + c.getMessage();
}
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Error CREATEing SolrCore '" + coreName + "': " + ex.getMessage() + rootMsg, ex);
}
} finally {
if (iAdded) {
inFlightCreations.remove(coreName);
}
}
}
/**
* Checks that the given path is relative to SOLR_HOME, SOLR_DATA_HOME, coreRootDirectory or one of the paths
* specified in solr.xml's allowPaths element. Delegates to {@link SolrPaths#assertPathAllowed(Path, Set)}
* @param pathToAssert path to check
* @throws SolrException if path is outside allowed paths
*/
public void assertPathAllowed(Path pathToAssert) throws SolrException {
SolrPaths.assertPathAllowed(pathToAssert, allowPaths);
}
/**
* <p>Return the file system paths that should be allowed for various API requests.
* This list is compiled at startup from SOLR_HOME, SOLR_DATA_HOME and the
* <code>allowPaths</code> configuration of solr.xml.
* These paths are used by the {@link #assertPathAllowed(Path)} method call.</p>
* <p><b>NOTE:</b></p> This method is currently only in use in tests in order to
* modify the mutable Set directly. Please treat this as a private method.
*/
@VisibleForTesting
public Set<Path> getAllowPaths() {
return allowPaths;
}
/**
* Creates a new core based on a CoreDescriptor.
*
* @param dcore a core descriptor
* @param publishState publish core state to the cluster if true
* <p>
* WARNING: Any call to this method should be surrounded by a try/finally block
* that calls solrCores.waitAddPendingCoreOps(...) and solrCores.removeFromPendingOps(...)
*
* <pre>
* <code>
* try {
* solrCores.waitAddPendingCoreOps(dcore.getName());
* createFromDescriptor(...);
* } finally {
* solrCores.removeFromPendingOps(dcore.getName());
* }
* </code>
* </pre>
* <p>
* Trying to put the waitAddPending... in this method results in Bad Things Happening due to race conditions.
* getCore() depends on getting the core returned _if_ it's in the pending list due to some other thread opening it.
* If the core is not in the pending list and not loaded, then getCore() calls this method. Anything that called
* to check if the core was loaded _or_ in pending ops and, based on the return called createFromDescriptor would
* introduce a race condition, see getCore() for the place it would be a problem
* @return the newly created core
*/
@SuppressWarnings("resource")
private SolrCore createFromDescriptor(CoreDescriptor dcore, boolean publishState, boolean newCollection) {
if (isShutDown) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Solr has been shutdown.");
}
SolrCore core = null;
try {
MDCLoggingContext.setCoreDescriptor(this, dcore);
SolrIdentifierValidator.validateCoreName(dcore.getName());
if (zkSys.getZkController() != null) {
zkSys.getZkController().preRegister(dcore, publishState);
}
ConfigSet coreConfig = coreConfigService.loadConfigSet(dcore);
dcore.setConfigSetTrusted(coreConfig.isTrusted());
if (log.isInfoEnabled()) {
log.info("Creating SolrCore '{}' using configuration from {}, trusted={}", dcore.getName(), coreConfig.getName(), dcore.isConfigSetTrusted());
}
try {
core = new SolrCore(this, dcore, coreConfig);
} catch (SolrException e) {
core = processCoreCreateException(e, dcore, coreConfig);
}
// always kick off recovery if we are in non-Cloud mode
if (!isZooKeeperAware() && core.getUpdateHandler().getUpdateLog() != null) {
core.getUpdateHandler().getUpdateLog().recoverFromLog();
}
registerCore(dcore, core, publishState, newCollection);
return core;
} catch (Exception e) {
coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
if (e instanceof ZkController.NotInClusterStateException && !newCollection) {
// this mostly happen when the core is deleted when this node is down
unload(dcore.getName(), true, true, true);
throw e;
}
solrCores.removeCoreDescriptor(dcore);
final SolrException solrException = new SolrException(ErrorCode.SERVER_ERROR, "Unable to create core [" + dcore.getName() + "]", e);
if (core != null && !core.isClosed())
IOUtils.closeQuietly(core);
throw solrException;
} catch (Throwable t) {
SolrException e = new SolrException(ErrorCode.SERVER_ERROR, "JVM Error creating core [" + dcore.getName() + "]: " + t.getMessage(), t);
coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
solrCores.removeCoreDescriptor(dcore);
if (core != null && !core.isClosed())
IOUtils.closeQuietly(core);
throw t;
} finally {
MDCLoggingContext.clear();
}
}
public boolean isSharedFs(CoreDescriptor cd) {
try (SolrCore core = this.getCore(cd.getName())) {
if (core != null) {
return core.getDirectoryFactory().isSharedStorage();
} else {
ConfigSet configSet = coreConfigService.loadConfigSet(cd);
return DirectoryFactory.loadDirectoryFactory(configSet.getSolrConfig(), this, null).isSharedStorage();
}
}
}
/**
* Take action when we failed to create a SolrCore. If error is due to corrupt index, try to recover. Various recovery
* strategies can be specified via system properties "-DCoreInitFailedAction={fromleader, none}"
*
* @param original the problem seen when loading the core the first time.
* @param dcore core descriptor for the core to create
* @param coreConfig core config for the core to create
* @return if possible
* @throws SolrException rethrows the original exception if we will not attempt to recover, throws a new SolrException with the
* original exception as a suppressed exception if there is a second problem creating the solr core.
* @see CoreInitFailedAction
*/
private SolrCore processCoreCreateException(SolrException original, CoreDescriptor dcore, ConfigSet coreConfig) {
// Traverse full chain since CIE may not be root exception
Throwable cause = original;
while ((cause = cause.getCause()) != null) {
if (cause instanceof CorruptIndexException) {
break;
}
}
// If no CorruptIndexException, nothing we can try here
if (cause == null) throw original;
CoreInitFailedAction action = CoreInitFailedAction.valueOf(System.getProperty(CoreInitFailedAction.class.getSimpleName(), "none"));
log.debug("CorruptIndexException while creating core, will attempt to repair via {}", action);
switch (action) {
case fromleader: // Recovery from leader on a CorruptedIndexException
if (isZooKeeperAware()) {
CloudDescriptor desc = dcore.getCloudDescriptor();
try {
Replica leader = getZkController().getClusterState()
.getCollection(desc.getCollectionName())
.getSlice(desc.getShardId())
.getLeader();
if (leader != null && leader.getState() == State.ACTIVE) {
log.info("Found active leader, will attempt to create fresh core and recover.");
resetIndexDirectory(dcore, coreConfig);
// the index of this core is emptied, its term should be set to 0
getZkController().getShardTerms(desc.getCollectionName(), desc.getShardId()).setTermToZero(desc.getCoreNodeName());
return new SolrCore(this, dcore, coreConfig);
}
} catch (SolrException se) {
se.addSuppressed(original);
throw se;
}
}
throw original;
case none:
throw original;
default:
log.warn("Failed to create core, and did not recognize specified 'CoreInitFailedAction': [{}]. Valid options are {}.",
action, Arrays.asList(CoreInitFailedAction.values()));
throw original;
}
}
/**
* Write a new index directory for the a SolrCore, but do so without loading it.
*/
private void resetIndexDirectory(CoreDescriptor dcore, ConfigSet coreConfig) {
SolrConfig config = coreConfig.getSolrConfig();
String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, dcore.getName());
DirectoryFactory df = DirectoryFactory.loadDirectoryFactory(config, this, registryName);
String dataDir = SolrCore.findDataDir(df, null, config, dcore);
String tmpIdxDirName = "index." + new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
SolrCore.modifyIndexProps(df, dataDir, config, tmpIdxDirName);
// Free the directory object that we had to create for this
Directory dir = null;
try {
dir = df.get(dataDir, DirContext.META_DATA, config.indexConfig.lockType);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} finally {
try {
df.release(dir);
df.doneWithDirectory(dir);
} catch (IOException e) {
SolrException.log(log, e);
}
}
}
/**
* Gets the permanent (non-transient) cores that are currently loaded.
*
* @return An unsorted list. This list is a new copy, it can be modified by the caller (e.g. it can be sorted).
*/
public List<SolrCore> getCores() {
return solrCores.getCores();
}
/**
* Gets the permanent and transient cores that are currently loaded, i.e. cores that have
* 1: loadOnStartup=true and are either not-transient or, if transient, have been loaded and have not been aged out
* 2: loadOnStartup=false and have been loaded but are either non-transient or have not been aged out.
* <p>
* Put another way, this will not return any names of cores that are lazily loaded but have not been called for yet
* or are transient and either not loaded or have been swapped out.
* <p>
* For efficiency, prefer to check {@link #isLoaded(String)} instead of {@link #getLoadedCoreNames()}.contains(coreName).
*
* @return An unsorted list. This list is a new copy, it can be modified by the caller (e.g. it can be sorted).
*/
public List<String> getLoadedCoreNames() {
return solrCores.getLoadedCoreNames();
}
/**
* Gets a collection of all the cores, permanent and transient, that are currently known, whether they are loaded or not.
* <p>
* For efficiency, prefer to check {@link #getCoreDescriptor(String)} != null instead of {@link #getAllCoreNames()}.contains(coreName).
*
* @return An unsorted list. This list is a new copy, it can be modified by the caller (e.g. it can be sorted).
*/
public List<String> getAllCoreNames() {
return solrCores.getAllCoreNames();
}
/**
* Gets the total number of cores, including permanent and transient cores, loaded and unloaded cores.
* Faster equivalent for {@link #getAllCoreNames()}.size().
*/
public int getNumAllCores() {
return solrCores.getNumAllCores();
}
/**
* Returns an immutable Map of Exceptions that occurred when initializing
* SolrCores (either at startup, or do to runtime requests to create cores)
* keyed off of the name (String) of the SolrCore that had the Exception
* during initialization.
* <p>
* While the Map returned by this method is immutable and will not change
* once returned to the client, the source data used to generate this Map
* can be changed as various SolrCore operations are performed:
* </p>
* <ul>
* <li>Failed attempts to create new SolrCores will add new Exceptions.</li>
* <li>Failed attempts to re-create a SolrCore using a name already contained in this Map will replace the Exception.</li>
* <li>Failed attempts to reload a SolrCore will cause an Exception to be added to this list -- even though the existing SolrCore with that name will continue to be available.</li>
* <li>Successful attempts to re-created a SolrCore using a name already contained in this Map will remove the Exception.</li>
* <li>Registering an existing SolrCore with a name already contained in this Map (ie: ALIAS or SWAP) will remove the Exception.</li>
* </ul>
*/
public Map<String, CoreLoadFailure> getCoreInitFailures() {
return ImmutableMap.copyOf(coreInitFailures);
}
// ---------------- Core name related methods ---------------
private CoreDescriptor reloadCoreDescriptor(CoreDescriptor oldDesc) {
if (oldDesc == null) {
return null;
}
CorePropertiesLocator cpl = new CorePropertiesLocator(null);
CoreDescriptor ret = cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
// Ok, this little jewel is all because we still create core descriptors on the fly from lists of properties
// in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in the new world
// of core discovery without writing the core.properties file out first.
//
// TODO: remove core.properties from the conf directory in test files, it's in a bad place there anyway.
if (ret == null) {
oldDesc.loadExtraProperties(); // there may be changes to extra properties that we need to pick up.
return oldDesc;
}
// The CloudDescriptor bit here is created in a very convoluted way, requiring access to private methods
// in ZkController. When reloading, this behavior is identical to what used to happen where a copy of the old
// CoreDescriptor was just re-used.
if (ret.getCloudDescriptor() != null) {
ret.getCloudDescriptor().reload(oldDesc.getCloudDescriptor());
}
return ret;
}
/**
* reloads a core
* refer {@link CoreContainer#reload(String, UUID)} for details
*/
public void reload(String name) {
reload(name, null);
}
/**
* Recreates a SolrCore.
* While the new core is loading, requests will continue to be dispatched to
* and processed by the old core
*
* @param name the name of the SolrCore to reload
* @param coreId The unique Id of the core {@link SolrCore#uniqueId}. If this is null, it's reloaded anyway. If the current
* core has a different id, this is a no-op
*/
public void reload(String name, UUID coreId) {
if (isShutDown) {
throw new AlreadyClosedException();
}
SolrCore newCore = null;
SolrCore core = solrCores.getCoreFromAnyList(name, false, coreId);
if (core != null) {
// The underlying core properties files may have changed, we don't really know. So we have a (perhaps) stale
// CoreDescriptor and we need to reload it from the disk files
CoreDescriptor cd = reloadCoreDescriptor(core.getCoreDescriptor());
solrCores.addCoreDescriptor(cd);
Closeable oldCore = null;
boolean success = false;
try {
solrCores.waitAddPendingCoreOps(cd.getName());
ConfigSet coreConfig = coreConfigService.loadConfigSet(cd);
if (log.isInfoEnabled()) {
log.info("Reloading SolrCore '{}' using configuration from {}", cd.getName(), coreConfig.getName());
}
newCore = core.reload(coreConfig);
DocCollection docCollection = null;
if (getZkController() != null) {
docCollection = getZkController().getClusterState().getCollection(cd.getCollectionName());
// turn off indexing now, before the new core is registered
if (docCollection.getBool(ZkStateReader.READ_ONLY, false)) {
newCore.readOnly = true;
}
}
registerCore(cd, newCore, false, false);
// force commit on old core if the new one is readOnly and prevent any new updates
if (newCore.readOnly) {
RefCounted<IndexWriter> iwRef = core.getSolrCoreState().getIndexWriter(null);
if (iwRef != null) {
IndexWriter iw = iwRef.get();
// switch old core to readOnly
core.readOnly = true;
try {
if (iw != null) {
iw.commit();
}
} finally {
iwRef.decref();
}
}
}
if (docCollection != null) {
Replica replica = docCollection.getReplica(cd.getCloudDescriptor().getCoreNodeName());
assert replica != null;
if (replica.getType() == Replica.Type.TLOG) { // TODO: needed here?
getZkController().stopReplicationFromLeader(core.getName());
if (!cd.getCloudDescriptor().isLeader()) {
getZkController().startReplicationFromLeader(newCore.getName(), true);
}
} else if (replica.getType() == Replica.Type.PULL) {
getZkController().stopReplicationFromLeader(core.getName());
getZkController().startReplicationFromLeader(newCore.getName(), false);
}
}
success = true;
} catch (SolrCoreState.CoreIsClosedException e) {
throw e;
} catch (Exception e) {
coreInitFailures.put(cd.getName(), new CoreLoadFailure(cd, e));
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to reload core [" + cd.getName() + "]", e);
} finally {
if (!success && newCore != null && newCore.getOpenCount() > 0) {
IOUtils.closeQuietly(newCore);
}
solrCores.removeFromPendingOps(cd.getName());
}
} else {
if(coreId != null) return;// yeah, this core is already reloaded/unloaded return right away
CoreLoadFailure clf = coreInitFailures.get(name);
if (clf != null) {
try {
solrCores.waitAddPendingCoreOps(clf.cd.getName());
createFromDescriptor(clf.cd, true, false);
} finally {
solrCores.removeFromPendingOps(clf.cd.getName());
}
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + name);
}
}
}
/**
* Swaps two SolrCore descriptors.
*/
public void swap(String n0, String n1) {
apiAssumeStandalone();
if (n0 == null || n1 == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can not swap unnamed cores.");
}
solrCores.swap(n0, n1);
coresLocator.swap(this, solrCores.getCoreDescriptor(n0), solrCores.getCoreDescriptor(n1));
log.info("swapped: {} with {}", n0, n1);
}
/**
* Unload a core from this container, leaving all files on disk
*
* @param name the name of the core to unload
*/
public void unload(String name) {
unload(name, false, false, false);
}
/**
* Unload a core from this container, optionally removing the core's data and configuration
*
* @param name the name of the core to unload
* @param deleteIndexDir if true, delete the core's index on close
* @param deleteDataDir if true, delete the core's data directory on close
* @param deleteInstanceDir if true, delete the core's instance directory on close
*/
public void unload(String name, boolean deleteIndexDir, boolean deleteDataDir, boolean deleteInstanceDir) {
CoreDescriptor cd = solrCores.getCoreDescriptor(name);
if (name != null) {
// check for core-init errors first
CoreLoadFailure loadFailure = coreInitFailures.remove(name);
if (loadFailure != null) {
// getting the index directory requires opening a DirectoryFactory with a SolrConfig, etc,
// which we may not be able to do because of the init error. So we just go with what we
// can glean from the CoreDescriptor - datadir and instancedir
SolrCore.deleteUnloadedCore(loadFailure.cd, deleteDataDir, deleteInstanceDir);
// If last time around we didn't successfully load, make sure that all traces of the coreDescriptor are gone.
if (cd != null) {
solrCores.removeCoreDescriptor(cd);
coresLocator.delete(this, cd);
}
return;
}
}
if (cd == null) {
log.warn("Cannot unload non-existent core '{}'", name);
throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
}
boolean close = solrCores.isLoadedNotPendingClose(name);
SolrCore core = solrCores.remove(name);
solrCores.removeCoreDescriptor(cd);
coresLocator.delete(this, cd);
if (core == null) {
// transient core
SolrCore.deleteUnloadedCore(cd, deleteDataDir, deleteInstanceDir);
return;
}
// delete metrics specific to this core
metricManager.removeRegistry(core.getCoreMetricManager().getRegistryName());
if (zkSys.getZkController() != null) {
// cancel recovery in cloud mode
core.getSolrCoreState().cancelRecovery();
if (cd.getCloudDescriptor().getReplicaType() == Replica.Type.PULL
|| cd.getCloudDescriptor().getReplicaType() == Replica.Type.TLOG) {
// Stop replication if this is part of a pull/tlog replica before closing the core
zkSys.getZkController().stopReplicationFromLeader(name);
}
}
core.unloadOnClose(cd, deleteIndexDir, deleteDataDir, deleteInstanceDir);
if (close)
core.closeAndWait();
if (zkSys.getZkController() != null) {
try {
zkSys.getZkController().unregister(name, cd);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted while unregistering core [" + name + "] from cloud state");
} catch (KeeperException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
}
}
}
public void rename(String name, String toName) {
apiAssumeStandalone();
SolrIdentifierValidator.validateCoreName(toName);
try (SolrCore core = getCore(name)) {
if (core != null) {
String oldRegistryName = core.getCoreMetricManager().getRegistryName();
String newRegistryName = SolrCoreMetricManager.createRegistryName(core, toName);
metricManager.swapRegistries(oldRegistryName, newRegistryName);
// The old coreDescriptor is obsolete, so remove it. registerCore will put it back.
CoreDescriptor cd = core.getCoreDescriptor();
solrCores.removeCoreDescriptor(cd);
cd.setProperty("name", toName);
solrCores.addCoreDescriptor(cd);
core.setName(toName);
registerCore(cd, core, true, false);
SolrCore old = solrCores.remove(name);
coresLocator.rename(this, old.getCoreDescriptor(), core.getCoreDescriptor());
}
}
}
private void apiAssumeStandalone() {
if (getZkController() != null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Not supported in SolrCloud");
}
}
/**
* Get the CoreDescriptors for all cores managed by this container
*
* @return a List of CoreDescriptors
*/
public List<CoreDescriptor> getCoreDescriptors() {
return solrCores.getCoreDescriptors();
}
public CoreDescriptor getCoreDescriptor(String coreName) {
return solrCores.getCoreDescriptor(coreName);
}
/** Where cores are created (absolute). */
public Path getCoreRootDirectory() {
return cfg.getCoreRootDirectory();
}
public SolrCore getCore(String name) {
return getCore(name, null);
}
/**
* Gets a core by name and increase its refcount.
*
* @param name the core name
* @return the core if found, null if a SolrCore by this name does not exist
* @throws SolrCoreInitializationException if a SolrCore with this name failed to be initialized
* @see SolrCore#close()
*/
public SolrCore getCore(String name, UUID id) {
// Do this in two phases since we don't want to lock access to the cores over a load.
SolrCore core = solrCores.getCoreFromAnyList(name, true, id);
// If a core is loaded, we're done just return it.
if (core != null) {
return core;
}
// If it's not yet loaded, we can check if it's had a core init failure and "do the right thing"
CoreDescriptor desc = solrCores.getCoreDescriptor(name);
// if there was an error initializing this core, throw a 500
// error with the details for clients attempting to access it.
CoreLoadFailure loadFailure = getCoreInitFailures().get(name);
if (null != loadFailure) {
throw new SolrCoreInitializationException(name, loadFailure.exception);
}
// This is a bit of awkwardness where SolrCloud and transient cores don't play nice together. For transient cores,
// we have to allow them to be created at any time there hasn't been a core load failure (use reload to cure that).
// But for TestConfigSetsAPI.testUploadWithScriptUpdateProcessor, this needs to _not_ try to load the core if
// the core is null and there was an error. If you change this, be sure to run both TestConfiSetsAPI and
// TestLazyCores
if (desc == null || zkSys.getZkController() != null) return null;
// This will put an entry in pending core ops if the core isn't loaded. Here's where moving the
// waitAddPendingCoreOps to createFromDescriptor would introduce a race condition.
core = solrCores.waitAddPendingCoreOps(name);
if (isShutDown) return null; // We're quitting, so stop. This needs to be after the wait above since we may come off
// the wait as a consequence of shutting down.
try {
if (core == null) {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(desc);
}
core = createFromDescriptor(desc, true, false); // This should throw an error if it fails.
}
core.open();
} finally {
solrCores.removeFromPendingOps(name);
}
return core;
}
public BlobRepository getBlobRepository() {
return blobRepository;
}
/**
* If using asyncSolrCoreLoad=true, calling this after {@link #load()} will
* not return until all cores have finished loading.
*
* @param timeoutMs timeout, upon which method simply returns
*/
public void waitForLoadingCoresToFinish(long timeoutMs) {
solrCores.waitForLoadingCoresToFinish(timeoutMs);
}
public void waitForLoadingCore(String name, long timeoutMs) {
solrCores.waitForLoadingCoreToFinish(name, timeoutMs);
}
// ---------------- CoreContainer request handlers --------------
@SuppressWarnings({"rawtypes"})
protected <T> T createHandler(String path, String handlerClass, Class<T> clazz) {
T handler = loader.newInstance(handlerClass, clazz, null, new Class[]{CoreContainer.class}, new Object[]{this});
if (handler instanceof SolrRequestHandler) {
containerHandlers.put(path, (SolrRequestHandler) handler);
}
if (handler instanceof SolrMetricProducer) {
((SolrMetricProducer) handler).initializeMetrics(solrMetricsContext, path);
}
return handler;
}
public CoreAdminHandler getMultiCoreHandler() {
return coreAdminHandler;
}
public CollectionsHandler getCollectionsHandler() {
return collectionsHandler;
}
public HealthCheckHandler getHealthCheckHandler() {
return healthCheckHandler;
}
public InfoHandler getInfoHandler() {
return infoHandler;
}
public ConfigSetsHandler getConfigSetsHandler() {
return configSetsHandler;
}
public String getHostName() {
return this.hostName;
}
/**
* Gets the alternate path for multicore handling:
* This is used in case there is a registered unnamed core (aka name is "") to
* declare an alternate way of accessing named cores.
* This can also be used in a pseudo single-core environment so admins can prepare
* a new version before swapping.
*/
public String getManagementPath() {
return cfg.getManagementPath();
}
@SuppressWarnings({"rawtypes"})
public LogWatcher getLogging() {
return logging;
}
/**
* Determines whether the core is already loaded or not but does NOT load the core
*/
public boolean isLoaded(String name) {
return solrCores.isLoaded(name);
}
public boolean isLoadedNotPendingClose(String name) {
return solrCores.isLoadedNotPendingClose(name);
}
// Primarily for transient cores when a core is aged out.
public void queueCoreToClose(SolrCore coreToClose) {
solrCores.queueCoreToClose(coreToClose);
}
/**
* Gets a solr core descriptor for a core that is not loaded. Note that if the caller calls this on a
* loaded core, the unloaded descriptor will be returned.
*
* @param cname - name of the unloaded core descriptor to load. NOTE:
* @return a coreDescriptor. May return null
*/
public CoreDescriptor getUnloadedCoreDescriptor(String cname) {
return solrCores.getUnloadedCoreDescriptor(cname);
}
/** The primary path of a Solr server's config, cores, and misc things. Absolute. */
//TODO return Path
public String getSolrHome() {
return solrHome.toString();
}
/**
* A path where Solr users can retrieve arbitrary files from. Absolute.
* <p>
* This directory is generally created by each node on startup. Files located in this directory can then be
* manipulated using select Solr features (e.g. streaming expressions).
*/
public Path getUserFilesPath() {
return solrHome.resolve("userfiles");
}
public boolean isZooKeeperAware() {
return zkSys.getZkController() != null;
}
public ZkController getZkController() {
return zkSys.getZkController();
}
public NodeConfig getConfig() {
return cfg;
}
/**
* The default ShardHandlerFactory used to communicate with other solr instances
*/
public ShardHandlerFactory getShardHandlerFactory() {
return shardHandlerFactory;
}
public UpdateShardHandler getUpdateShardHandler() {
return updateShardHandler;
}
public SolrResourceLoader getResourceLoader() {
return loader;
}
public boolean isCoreLoading(String name) {
return solrCores.isCoreLoading(name);
}
public AuthorizationPlugin getAuthorizationPlugin() {
return authorizationPlugin == null ? null : authorizationPlugin.plugin;
}
public AuthenticationPlugin getAuthenticationPlugin() {
return authenticationPlugin == null ? null : authenticationPlugin.plugin;
}
public AuditLoggerPlugin getAuditLoggerPlugin() {
return auditloggerPlugin == null ? null : auditloggerPlugin.plugin;
}
public NodeConfig getNodeConfig() {
return cfg;
}
public long getStatus() {
return status;
}
// Occasionally we need to access the transient cache handler in places other than coreContainer.
public TransientSolrCoreCache getTransientCache() {
return solrCores.getTransientCacheHandler();
}
/**
* @param solrCore the core against which we check if there has been a tragic exception
* @return whether this Solr core has tragic exception
* @see org.apache.lucene.index.IndexWriter#getTragicException()
*/
public boolean checkTragicException(SolrCore solrCore) {
Throwable tragicException;
try {
tragicException = solrCore.getSolrCoreState().getTragicException();
} catch (IOException e) {
// failed to open an indexWriter
tragicException = e;
}
if (tragicException != null && isZooKeeperAware()) {
getZkController().giveupLeadership(solrCore.getCoreDescriptor());
try {
// If the error was something like a full file system disconnect, this probably won't help
// But if it is a transient disk failure then it's worth a try
solrCore.getSolrCoreState().newIndexWriter(solrCore, false); // should we rollback?
} catch (IOException e) {
log.warn("Could not roll index writer after tragedy");
}
}
return tragicException != null;
}
public ContainerPluginsRegistry getContainerPluginsRegistry() {
return containerPluginsRegistry;
}
public ClusterSingletons getClusterSingletons() {
return clusterSingletons;
}
public ClusterEventProducer getClusterEventProducer() {
return clusterEventProducer;
}
public PlacementPluginFactory<? extends PlacementPluginConfig> getPlacementPluginFactory() {
return placementPluginFactory;
}
static {
ExecutorUtil.addThreadLocalProvider(SolrRequestInfo.getInheritableThreadLocalProvider());
}
/**
* Run an arbitrary task in it's own thread. This is an expert option and is
* a method you should use with great care. It would be bad to run something that never stopped
* or run something that took a very long time. Typically this is intended for actions that take
* a few seconds, and therefore would be bad to wait for within a request, or actions that need to happen
* when a core has zero references, but but would not pose a significant hindrance to server shut down times.
* It is not intended for long running tasks and if you are using a Runnable with a loop in it, you are
* almost certainly doing it wrong.
* <p><br>
* WARNING: Solr wil not be able to shut down gracefully until this task completes!
* <p><br>
* A significant upside of using this method vs creating your own ExecutorService is that your code
* does not have to properly shutdown executors which typically is risky from a unit testing
* perspective since the test framework will complain if you don't carefully ensure the executor
* shuts down before the end of the test. Also the threads running this task are sure to have
* a proper MDC for logging.
* <p><br>
* Normally, one uses {@link SolrCore#runAsync(Runnable)} if possible, but in some cases
* you might need to execute a task asynchronously when you could be running on a node with no
* cores, and then use of this method is indicated.
*
* @param r the task to run
*/
public void runAsync(Runnable r) {
coreContainerAsyncTaskExecutor.submit(r);
}
}
class CloserThread extends Thread {
CoreContainer container;
SolrCores solrCores;
NodeConfig cfg;
CloserThread(CoreContainer container, SolrCores solrCores, NodeConfig cfg) {
super("CloserThread");
this.container = container;
this.solrCores = solrCores;
this.cfg = cfg;
}
// It's important that this be the _only_ thread removing things from pendingDynamicCloses!
// This is single-threaded, but I tried a multi-threaded approach and didn't see any performance gains, so
// there's no good justification for the complexity. I suspect that the locking on things like DefaultSolrCoreState
// essentially create a single-threaded process anyway.
@Override
public void run() {
while (!container.isShutDown()) {
synchronized (solrCores.getModifyLock()) { // need this so we can wait and be awoken.
try {
solrCores.getModifyLock().wait();
} catch (InterruptedException e) {
// Well, if we've been told to stop, we will. Otherwise, continue on and check to see if there are
// any cores to close.
}
}
for (SolrCore removeMe = solrCores.getCoreToClose();
removeMe != null && !container.isShutDown();
removeMe = solrCores.getCoreToClose()) {
try {
removeMe.close();
} finally {
solrCores.removeFromPendingOps(removeMe.getName());
}
}
}
}
}
| 1 | 40,240 | Lets call this getConfigSetService and maybe rename the field now (or later) | apache-lucene-solr | java |
@@ -27,8 +27,8 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
- "k8s.io/klog"
- "k8s.io/klog/klogr"
+ "k8s.io/klog/v2"
+ "k8s.io/klog/v2/klogr"
"github.com/jetstack/cert-manager/pkg/api"
) | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logs
import (
"context"
"flag"
"log"
"time"
"github.com/go-logr/logr"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
"k8s.io/klog/klogr"
"github.com/jetstack/cert-manager/pkg/api"
)
var (
Log = klogr.New().WithName("cert-manager")
ErrorLevel = 0
WarnLevel = 1
InfoLevel = 2
DebugLevel = 3
)
var logFlushFreq = flag.Duration("log-flush-frequency", 5*time.Second, "Maximum number of seconds between log flushes")
// GlogWriter serves as a bridge between the standard log package and the glog package.
type GlogWriter struct{}
// Write implements the io.Writer interface.
func (writer GlogWriter) Write(data []byte) (n int, err error) {
klog.Info(string(data))
return len(data), nil
}
// InitLogs initializes logs the way we want for kubernetes.
func InitLogs(fs *flag.FlagSet) {
if fs == nil {
fs = flag.CommandLine
}
klog.InitFlags(fs)
fs.Set("logtostderr", "true")
log.SetOutput(GlogWriter{})
log.SetFlags(0)
// The default glog flush interval is 30 seconds, which is frighteningly long.
go wait.Until(klog.Flush, *logFlushFreq, wait.NeverStop)
}
// FlushLogs flushes logs immediately.
func FlushLogs() {
klog.Flush()
}
const (
ResourceNameKey = "resource_name"
ResourceNamespaceKey = "resource_namespace"
ResourceKindKey = "resource_kind"
RelatedResourceNameKey = "related_resource_name"
RelatedResourceNamespaceKey = "related_resource_namespace"
RelatedResourceKindKey = "related_resource_kind"
)
func WithResource(l logr.Logger, obj metav1.Object) logr.Logger {
var gvk schema.GroupVersionKind
if runtimeObj, ok := obj.(runtime.Object); ok {
gvks, _, _ := api.Scheme.ObjectKinds(runtimeObj)
if len(gvks) > 0 {
gvk = gvks[0]
}
}
// TODO: add resource apiVersion
return l.WithValues(
ResourceNameKey, obj.GetName(),
ResourceNamespaceKey, obj.GetNamespace(),
ResourceKindKey, gvk.Kind,
)
}
func WithRelatedResource(l logr.Logger, obj metav1.Object) logr.Logger {
var gvk schema.GroupVersionKind
if runtimeObj, ok := obj.(runtime.Object); ok {
gvks, _, _ := api.Scheme.ObjectKinds(runtimeObj)
if len(gvks) > 0 {
gvk = gvks[0]
}
}
// TODO: add resource apiVersion
return l.WithValues(
RelatedResourceNameKey, obj.GetName(),
RelatedResourceNamespaceKey, obj.GetNamespace(),
RelatedResourceKindKey, gvk.Kind,
)
}
func WithRelatedResourceName(l logr.Logger, name, namespace, kind string) logr.Logger {
return l.WithValues(
RelatedResourceNameKey, name,
RelatedResourceNamespaceKey, namespace,
RelatedResourceKindKey, kind,
)
}
var contextKey = &struct{}{}
func FromContext(ctx context.Context, names ...string) logr.Logger {
l := ctx.Value(contextKey)
if l == nil {
return Log
}
lT := l.(logr.Logger)
for _, n := range names {
lT = lT.WithName(n)
}
return lT
}
func NewContext(ctx context.Context, l logr.Logger, names ...string) context.Context {
if ctx == nil {
ctx = context.Background()
}
if l == nil {
l = FromContext(ctx)
}
for _, n := range names {
l = l.WithName(n)
}
return context.WithValue(ctx, contextKey, l)
}
| 1 | 22,894 | Could / should we make these constants and give them a type? | jetstack-cert-manager | go |
@@ -63,6 +63,13 @@ const (
deleteAfterAnnotation = "hive.openshift.io/delete-after" // contains a duration after which the cluster should be cleaned up.
tryInstallOnceAnnotation = "hive.openshift.io/try-install-once"
+
+ platformAWS = "aws"
+ platformAzure = "azure"
+ platformGCP = "GCP"
+ platformBaremetal = "baremetal"
+ platformUnknown = "unknown platform"
+ regionUnknown = "unknown region"
)
var ( | 1 | package clusterdeployment
import (
"context"
"fmt"
"os"
"reflect"
"sort"
"strings"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
routev1 "github.com/openshift/api/route/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8slabels "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
apihelpers "github.com/openshift/hive/pkg/apis/helpers"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
"github.com/openshift/hive/pkg/constants"
"github.com/openshift/hive/pkg/controller/images"
hivemetrics "github.com/openshift/hive/pkg/controller/metrics"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
"github.com/openshift/hive/pkg/imageset"
"github.com/openshift/hive/pkg/install"
"github.com/openshift/hive/pkg/remoteclient"
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = hivev1.SchemeGroupVersion.WithKind("ClusterDeployment")
const (
controllerName = "clusterDeployment"
defaultRequeueTime = 10 * time.Second
maxProvisions = 3
rawAdminKubeconfigKey = "raw-kubeconfig"
clusterImageSetNotFoundReason = "ClusterImageSetNotFound"
clusterImageSetFoundReason = "ClusterImageSetFound"
dnsNotReadyReason = "DNSNotReady"
dnsReadyReason = "DNSReady"
dnsReadyAnnotation = "hive.openshift.io/dnsready"
deleteAfterAnnotation = "hive.openshift.io/delete-after" // contains a duration after which the cluster should be cleaned up.
tryInstallOnceAnnotation = "hive.openshift.io/try-install-once"
)
var (
metricCompletedInstallJobRestarts = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_completed_install_restart",
Help: "Distribution of the number of restarts for all completed cluster installations.",
Buckets: []float64{0, 2, 10, 20, 50},
},
[]string{"cluster_type"},
)
metricInstallJobDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_install_job_duration_seconds",
Help: "Distribution of the runtime of completed install jobs.",
Buckets: []float64{60, 300, 600, 1200, 1800, 2400, 3000, 3600},
},
)
metricInstallDelaySeconds = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_install_job_delay_seconds",
Help: "Time between cluster deployment creation and creation of the job to install/provision the cluster.",
Buckets: []float64{30, 60, 120, 300, 600, 1200, 1800},
},
)
metricImageSetDelaySeconds = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_imageset_job_delay_seconds",
Help: "Time between cluster deployment creation and creation of the job which resolves the installer image to use for a ClusterImageSet.",
Buckets: []float64{10, 30, 60, 300, 600, 1200, 1800},
},
)
metricClustersCreated = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_cluster_deployments_created_total",
Help: "Counter incremented every time we observe a new cluster.",
},
[]string{"cluster_type"},
)
metricClustersInstalled = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_cluster_deployments_installed_total",
Help: "Counter incremented every time we observe a successful installation.",
},
[]string{"cluster_type"},
)
metricClustersDeleted = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_cluster_deployments_deleted_total",
Help: "Counter incremented every time we observe a deleted cluster.",
},
[]string{"cluster_type"},
)
metricDNSDelaySeconds = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_dns_delay_seconds",
Help: "Time between cluster deployment with spec.manageDNS creation and the DNSZone becoming ready.",
Buckets: []float64{10, 30, 60, 300, 600, 1200, 1800},
},
)
)
func init() {
metrics.Registry.MustRegister(metricInstallJobDuration)
metrics.Registry.MustRegister(metricCompletedInstallJobRestarts)
metrics.Registry.MustRegister(metricInstallDelaySeconds)
metrics.Registry.MustRegister(metricImageSetDelaySeconds)
metrics.Registry.MustRegister(metricClustersCreated)
metrics.Registry.MustRegister(metricClustersInstalled)
metrics.Registry.MustRegister(metricClustersDeleted)
metrics.Registry.MustRegister(metricDNSDelaySeconds)
}
// Add creates a new ClusterDeployment controller and adds it to the manager with default RBAC.
func Add(mgr manager.Manager) error {
return AddToManager(mgr, NewReconciler(mgr))
}
// NewReconciler returns a new reconcile.Reconciler
func NewReconciler(mgr manager.Manager) reconcile.Reconciler {
logger := log.WithField("controller", controllerName)
r := &ReconcileClusterDeployment{
Client: controllerutils.NewClientWithMetricsOrDie(mgr, controllerName),
scheme: mgr.GetScheme(),
logger: logger,
expectations: controllerutils.NewExpectations(logger),
}
r.remoteClusterAPIClientBuilder = func(cd *hivev1.ClusterDeployment) remoteclient.Builder {
return remoteclient.NewBuilder(r.Client, cd, controllerName)
}
return r
}
// AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler
func AddToManager(mgr manager.Manager, r reconcile.Reconciler) error {
cdReconciler, ok := r.(*ReconcileClusterDeployment)
if !ok {
return errors.New("reconciler supplied is not a ReconcileClusterDeployment")
}
c, err := controller.New("clusterdeployment-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: controllerutils.GetConcurrentReconciles()})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error getting new cluster deployment")
return err
}
// Watch for changes to ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestForObject{})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment")
return err
}
// Watch for provisions
if err := cdReconciler.watchClusterProvisions(c); err != nil {
return err
}
// Watch for jobs created by a ClusterDeployment:
err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment job")
return err
}
// Watch for pods created by an install job
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestsFromMapFunc{
ToRequests: handler.ToRequestsFunc(selectorPodWatchHandler),
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment pods")
return err
}
// Watch for deprovision requests created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeprovision{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching deprovision request created by cluster deployment")
return err
}
// Watch for dnszones created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.DNSZone{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment dnszones")
return err
}
// Watch for changes to SyncSetInstance
err = c.Watch(&source.Kind{Type: &hivev1.SyncSetInstance{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
return fmt.Errorf("cannot start watch on syncset instance: %v", err)
}
return nil
}
var _ reconcile.Reconciler = &ReconcileClusterDeployment{}
// ReconcileClusterDeployment reconciles a ClusterDeployment object
type ReconcileClusterDeployment struct {
client.Client
scheme *runtime.Scheme
logger log.FieldLogger
// A TTLCache of clusterprovision creates each clusterdeployment expects to see
expectations controllerutils.ExpectationsInterface
// remoteClusterAPIClientBuilder is a function pointer to the function that gets a builder for building a client
// for the remote cluster's API server
remoteClusterAPIClientBuilder func(cd *hivev1.ClusterDeployment) remoteclient.Builder
}
// Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes based on the state read
// and what is in the ClusterDeployment.Spec
//
// Automatically generate RBAC rules to allow the Controller to read and write Deployments
//
func (r *ReconcileClusterDeployment) Reconcile(request reconcile.Request) (result reconcile.Result, returnErr error) {
start := time.Now()
cdLog := r.logger.WithFields(log.Fields{
"controller": controllerName,
"clusterDeployment": request.Name,
"namespace": request.Namespace,
})
// For logging, we need to see when the reconciliation loop starts and ends.
cdLog.Info("reconciling cluster deployment")
defer func() {
dur := time.Since(start)
hivemetrics.MetricControllerReconcileTime.WithLabelValues(controllerName).Observe(dur.Seconds())
cdLog.WithField("elapsed", dur).WithField("result", result).Info("reconcile complete")
}()
// Fetch the ClusterDeployment instance
cd := &hivev1.ClusterDeployment{}
err := r.Get(context.TODO(), request.NamespacedName, cd)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
cdLog.Info("cluster deployment Not Found")
r.expectations.DeleteExpectations(request.NamespacedName.String())
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
cdLog.WithError(err).Error("Error getting cluster deployment")
return reconcile.Result{}, err
}
return r.reconcile(request, cd, cdLog)
}
func (r *ReconcileClusterDeployment) addAdditionalKubeconfigCAs(cd *hivev1.ClusterDeployment,
cdLog log.FieldLogger) error {
adminKubeconfigSecret := &corev1.Secret{}
if err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name}, adminKubeconfigSecret); err != nil {
cdLog.WithError(err).Error("failed to get admin kubeconfig secret")
return err
}
originalSecret := adminKubeconfigSecret.DeepCopy()
rawData, hasRawData := adminKubeconfigSecret.Data[rawAdminKubeconfigKey]
if !hasRawData {
adminKubeconfigSecret.Data[rawAdminKubeconfigKey] = adminKubeconfigSecret.Data[constants.KubeconfigSecretKey]
rawData = adminKubeconfigSecret.Data[constants.KubeconfigSecretKey]
}
var err error
adminKubeconfigSecret.Data[constants.KubeconfigSecretKey], err = controllerutils.AddAdditionalKubeconfigCAs(rawData)
if err != nil {
cdLog.WithError(err).Errorf("error adding additional CAs to admin kubeconfig")
return err
}
if reflect.DeepEqual(originalSecret.Data, adminKubeconfigSecret.Data) {
cdLog.Debug("secret data has not changed, no need to update")
return nil
}
cdLog.Info("admin kubeconfig has been modified, updating")
err = r.Update(context.TODO(), adminKubeconfigSecret)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating admin kubeconfig secret")
return err
}
return nil
}
func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (result reconcile.Result, returnErr error) {
// Set platform label on the ClusterDeployment
if platform := getClusterPlatform(cd); cd.Labels[hivev1.HiveClusterPlatformLabel] != platform {
if cd.Labels == nil {
cd.Labels = make(map[string]string)
}
if cd.Labels[hivev1.HiveClusterPlatformLabel] != "" {
cdLog.Warnf("changing the value of %s from %s to %s", hivev1.HiveClusterPlatformLabel,
cd.Labels[hivev1.HiveClusterPlatformLabel], platform)
}
cd.Labels[hivev1.HiveClusterPlatformLabel] = platform
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to set cluster platform label")
}
return reconcile.Result{}, err
}
if cd.DeletionTimestamp != nil {
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
clearUnderwaySecondsMetrics(cd)
return reconcile.Result{}, nil
}
// Deprovision still underway, report metric for this cluster.
hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(
time.Since(cd.DeletionTimestamp.Time).Seconds())
// If the cluster never made it to installed, make sure we clear the provisioning
// underway metric.
if !cd.Spec.Installed {
hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(0.0)
}
return r.syncDeletedClusterDeployment(cd, cdLog)
}
// Check for the delete-after annotation, and if the cluster has expired, delete it
deleteAfter, ok := cd.Annotations[deleteAfterAnnotation]
if ok {
cdLog.Debugf("found delete after annotation: %s", deleteAfter)
dur, err := time.ParseDuration(deleteAfter)
if err != nil {
return reconcile.Result{}, fmt.Errorf("error parsing %s as a duration: %v", deleteAfterAnnotation, err)
}
if !cd.CreationTimestamp.IsZero() {
expiry := cd.CreationTimestamp.Add(dur)
cdLog.Debugf("cluster expires at: %s", expiry)
if time.Now().After(expiry) {
cdLog.WithField("expiry", expiry).Info("cluster has expired, issuing delete")
err := r.Delete(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting expired cluster")
}
return reconcile.Result{}, err
}
defer func() {
requeueNow := result.Requeue && result.RequeueAfter <= 0
if returnErr == nil && !requeueNow {
// We have an expiry time but we're not expired yet. Set requeueAfter for just after expiry time
// so that we requeue cluster for deletion once reconcile has completed
requeueAfter := time.Until(expiry) + 60*time.Second
if requeueAfter < result.RequeueAfter || result.RequeueAfter <= 0 {
cdLog.Debugf("cluster will re-sync due to expiry time in: %v", requeueAfter)
result.RequeueAfter = requeueAfter
}
}
}()
}
}
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
cdLog.Debugf("adding clusterdeployment finalizer")
if err := r.addClusterDeploymentFinalizer(cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding finalizer")
return reconcile.Result{}, err
}
metricClustersCreated.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return reconcile.Result{}, nil
}
if cd.Spec.Installed {
// update SyncSetFailedCondition status condition
cdLog.Info("Check if any syncsetinstance Failed")
updateCD, err := r.setSyncSetFailedCondition(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("Error updating SyncSetFailedCondition status condition")
return reconcile.Result{}, err
} else if updateCD {
return reconcile.Result{}, nil
}
cdLog.Debug("cluster is already installed, no processing of provision needed")
r.cleanupInstallLogPVC(cd, cdLog)
if cd.Spec.ClusterMetadata != nil &&
cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name != "" {
if err := r.addAdditionalKubeconfigCAs(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
if cd.Status.WebConsoleURL == "" || cd.Status.APIURL == "" {
if err := r.setClusterStatusURLs(cd, cdLog); err != nil {
cdLog.WithError(err).Error("failed to set admin kubeconfig status")
return reconcile.Result{}, err
}
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not set installed status")
return reconcile.Result{}, err
}
}
}
return reconcile.Result{}, nil
}
// Indicate that the cluster is still installing:
hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(
time.Since(cd.CreationTimestamp.Time).Seconds())
imageSet, err := r.getClusterImageSet(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
releaseImage := r.getReleaseImage(cd, imageSet, cdLog)
cdLog.Debug("loading pull secrets")
pullSecret, err := r.mergePullSecrets(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("Error merging pull secrets")
return reconcile.Result{}, err
}
// Update the pull secret object if required
switch updated, err := r.updatePullSecretInfo(pullSecret, cd, cdLog); {
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "Error updating the merged pull secret")
return reconcile.Result{}, err
case updated:
// The controller will not automatically requeue the cluster deployment
// since the controller is not watching for secrets. So, requeue manually.
return reconcile.Result{Requeue: true}, nil
}
switch result, err := r.resolveInstallerImage(cd, imageSet, releaseImage, cdLog); {
case err != nil:
return reconcile.Result{}, err
case result != nil:
return *result, nil
}
if !r.expectations.SatisfiedExpectations(request.String()) {
cdLog.Debug("waiting for expectations to be satisfied")
return reconcile.Result{}, nil
}
if cd.Status.ProvisionRef == nil {
if cd.Status.InstallRestarts > 0 && cd.Annotations[tryInstallOnceAnnotation] == "true" {
cdLog.Debug("not creating new provision since the deployment is set to try install only once")
return reconcile.Result{}, nil
}
return r.startNewProvision(cd, releaseImage, cdLog)
}
return r.reconcileExistingProvision(cd, cdLog)
}
func (r *ReconcileClusterDeployment) startNewProvision(
cd *hivev1.ClusterDeployment,
releaseImage string,
cdLog log.FieldLogger,
) (result reconcile.Result, returnedErr error) {
existingProvisions, err := r.existingProvisions(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
for _, provision := range existingProvisions {
if provision.Spec.Stage != hivev1.ClusterProvisionStageFailed {
return reconcile.Result{}, r.adoptProvision(cd, provision, cdLog)
}
}
r.deleteStaleProvisions(existingProvisions, cdLog)
if cd.Spec.ManageDNS {
dnsZone, err := r.ensureManagedDNSZone(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
if dnsZone == nil {
return reconcile.Result{}, nil
}
updated, err := r.setDNSDelayMetric(cd, dnsZone, cdLog)
if updated || err != nil {
return reconcile.Result{}, err
}
}
if err := controllerutils.SetupClusterInstallServiceAccount(r, cd.Namespace, cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error setting up service account and role")
return reconcile.Result{}, err
}
provisionName := apihelpers.GetResourceName(cd.Name, fmt.Sprintf("%d-%s", cd.Status.InstallRestarts, utilrand.String(5)))
labels := cd.Labels
if labels == nil {
labels = map[string]string{}
}
labels[constants.ClusterDeploymentNameLabel] = cd.Name
skipGatherLogs := os.Getenv(constants.SkipGatherLogsEnvVar) == "true"
if !skipGatherLogs {
if err := r.createPVC(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
}
podSpec, err := install.InstallerPodSpec(
cd,
provisionName,
releaseImage,
controllerutils.ServiceAccountName,
GetInstallLogsPVCName(cd),
skipGatherLogs,
)
if err != nil {
cdLog.WithError(err).Error("could not generate installer pod spec")
return reconcile.Result{}, err
}
provision := &hivev1.ClusterProvision{
ObjectMeta: metav1.ObjectMeta{
Name: provisionName,
Namespace: cd.Namespace,
Labels: labels,
},
Spec: hivev1.ClusterProvisionSpec{
ClusterDeploymentRef: corev1.LocalObjectReference{
Name: cd.Name,
},
PodSpec: *podSpec,
Attempt: cd.Status.InstallRestarts,
Stage: hivev1.ClusterProvisionStageInitializing,
},
}
// Copy over the cluster ID and infra ID from previous provision so that a failed install can be removed.
if cd.Spec.ClusterMetadata != nil {
provision.Spec.PrevClusterID = &cd.Spec.ClusterMetadata.ClusterID
provision.Spec.PrevInfraID = &cd.Spec.ClusterMetadata.InfraID
}
cdLog.WithField("derivedObject", provision.Name).Debug("Setting label on derived object")
provision.Labels = k8slabels.AddLabel(provision.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
if err := controllerutil.SetControllerReference(cd, provision, r.scheme); err != nil {
cdLog.WithError(err).Error("could not set the owner ref on provision")
return reconcile.Result{}, err
}
r.expectations.ExpectCreations(types.NamespacedName{Namespace: cd.Namespace, Name: cd.Name}.String(), 1)
if err := r.Create(context.TODO(), provision); err != nil {
cdLog.WithError(err).Error("could not create provision")
r.expectations.CreationObserved(types.NamespacedName{Namespace: cd.Namespace, Name: cd.Name}.String())
return reconcile.Result{}, err
}
cdLog.WithField("provision", provision.Name).Info("created new provision")
if cd.Status.InstallRestarts == 0 {
kickstartDuration := time.Since(cd.CreationTimestamp.Time)
cdLog.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to first provision seconds")
metricInstallDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
}
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) reconcileExistingProvision(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (result reconcile.Result, returnedErr error) {
cdLog = cdLog.WithField("provision", cd.Status.ProvisionRef.Name)
cdLog.Debug("reconciling existing provision")
provision := &hivev1.ClusterProvision{}
switch err := r.Get(context.TODO(), types.NamespacedName{Name: cd.Status.ProvisionRef.Name, Namespace: cd.Namespace}, provision); {
case apierrors.IsNotFound(err):
cdLog.Warn("linked provision not found")
return r.clearOutCurrentProvision(cd, cdLog)
case err != nil:
cdLog.WithError(err).Error("could not get provision")
return reconcile.Result{}, err
}
// Save the cluster ID and infra ID from the provision so that we can
// clean up partial installs on the next provision attempt in case of failure.
if provision.Spec.InfraID != nil {
clusterMetadata := &hivev1.ClusterMetadata{}
clusterMetadata.InfraID = *provision.Spec.InfraID
if provision.Spec.ClusterID != nil {
clusterMetadata.ClusterID = *provision.Spec.ClusterID
}
if provision.Spec.AdminKubeconfigSecretRef != nil {
clusterMetadata.AdminKubeconfigSecretRef = *provision.Spec.AdminKubeconfigSecretRef
}
if provision.Spec.AdminPasswordSecretRef != nil {
clusterMetadata.AdminPasswordSecretRef = *provision.Spec.AdminPasswordSecretRef
}
if !reflect.DeepEqual(clusterMetadata, cd.Spec.ClusterMetadata) {
cd.Spec.ClusterMetadata = clusterMetadata
cdLog.Infof("Saving infra ID %q for cluster", cd.Spec.ClusterMetadata.InfraID)
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating clusterdeployment status with infra ID")
}
return reconcile.Result{}, err
}
}
switch provision.Spec.Stage {
case hivev1.ClusterProvisionStageInitializing:
cdLog.Debug("still initializing provision")
return reconcile.Result{}, nil
case hivev1.ClusterProvisionStageProvisioning:
cdLog.Debug("still provisioning")
return reconcile.Result{}, nil
case hivev1.ClusterProvisionStageFailed:
return r.reconcileFailedProvision(cd, provision, cdLog)
case hivev1.ClusterProvisionStageComplete:
return r.reconcileCompletedProvision(cd, provision, cdLog)
default:
cdLog.WithField("stage", provision.Spec.Stage).Error("unknown provision stage")
return reconcile.Result{}, errors.New("unknown provision stage")
}
}
func (r *ReconcileClusterDeployment) reconcileFailedProvision(cd *hivev1.ClusterDeployment, provision *hivev1.ClusterProvision, cdLog log.FieldLogger) (reconcile.Result, error) {
nextProvisionTime := time.Now()
reason := "MissingCondition"
failedCond := controllerutils.FindClusterProvisionCondition(provision.Status.Conditions, hivev1.ClusterProvisionFailedCondition)
if failedCond != nil && failedCond.Status == corev1.ConditionTrue {
nextProvisionTime = calculateNextProvisionTime(failedCond.LastTransitionTime.Time, cd.Status.InstallRestarts, cdLog)
reason = failedCond.Reason
} else {
cdLog.Warnf("failed provision does not have a %s condition", hivev1.ClusterProvisionFailedCondition)
}
newConditions, condChange := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.ProvisionFailedCondition,
corev1.ConditionTrue,
reason,
fmt.Sprintf("Provision %s failed. Next provision at %s.", provision.Name, nextProvisionTime.UTC().Format(time.RFC3339)),
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
cd.Status.Conditions = newConditions
timeUntilNextProvision := time.Until(nextProvisionTime)
if timeUntilNextProvision.Seconds() > 0 {
cdLog.WithField("nextProvision", nextProvisionTime).Info("waiting to start a new provision after failure")
if condChange {
if err := r.statusUpdate(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{RequeueAfter: timeUntilNextProvision}, nil
}
cdLog.Info("clearing current failed provision to make way for a new provision")
return r.clearOutCurrentProvision(cd, cdLog)
}
func (r *ReconcileClusterDeployment) reconcileCompletedProvision(cd *hivev1.ClusterDeployment, provision *hivev1.ClusterProvision, cdLog log.FieldLogger) (reconcile.Result, error) {
cdLog.Info("provision completed successfully")
statusChange := false
if cd.Status.InstalledTimestamp == nil {
statusChange = true
now := metav1.Now()
cd.Status.InstalledTimestamp = &now
}
conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.ProvisionFailedCondition,
corev1.ConditionFalse,
"ProvisionSucceeded",
fmt.Sprintf("Provision %s succeeded.", provision.Name),
controllerutils.UpdateConditionNever,
)
if changed {
statusChange = true
cd.Status.Conditions = conds
}
if cd.Spec.ClusterMetadata != nil &&
cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name != "" {
if err := r.addAdditionalKubeconfigCAs(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
if cd.Status.WebConsoleURL == "" || cd.Status.APIURL == "" {
statusChange = true
if err := r.setClusterStatusURLs(cd, cdLog); err != nil {
cdLog.WithError(err).Error("failed to set cluster status URLs")
return reconcile.Result{}, err
}
}
}
if statusChange {
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to update cluster deployment status")
return reconcile.Result{}, err
}
}
if cd.Spec.Installed {
return reconcile.Result{}, nil
}
cd.Spec.Installed = true
if err := r.Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to set the Installed flag")
return reconcile.Result{}, err
}
// jobDuration calculates the time elapsed since the first clusterprovision was created
startTime := cd.CreationTimestamp
if firstProvision := r.getFirstProvision(cd, cdLog); firstProvision != nil {
startTime = firstProvision.CreationTimestamp
}
jobDuration := time.Since(startTime.Time)
cdLog.WithField("duration", jobDuration.Seconds()).Debug("install job completed")
metricInstallJobDuration.Observe(float64(jobDuration.Seconds()))
// Report a metric for the total number of install restarts:
metricCompletedInstallJobRestarts.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).
Observe(float64(cd.Status.InstallRestarts))
// Clear the install underway seconds metric. After this no-one should be reporting
// this metric for this cluster.
hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(0.0)
metricClustersInstalled.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) clearOutCurrentProvision(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
cd.Status.ProvisionRef = nil
cd.Status.InstallRestarts = cd.Status.InstallRestarts + 1
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not clear out current provision")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// GetInstallLogsPVCName returns the expected name of the persistent volume claim for cluster install failure logs.
func GetInstallLogsPVCName(cd *hivev1.ClusterDeployment) string {
return apihelpers.GetResourceName(cd.Name, "install-logs")
}
// createPVC will create the PVC for the install logs if it does not already exist.
func (r *ReconcileClusterDeployment) createPVC(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
pvcName := GetInstallLogsPVCName(cd)
switch err := r.Get(context.TODO(), types.NamespacedName{Name: pvcName, Namespace: cd.Namespace}, &corev1.PersistentVolumeClaim{}); {
case err == nil:
cdLog.Debug("pvc already exists")
return nil
case !apierrors.IsNotFound(err):
cdLog.WithError(err).Error("error getting persistent volume claim")
return err
}
labels := map[string]string{
constants.InstallJobLabel: "true",
constants.ClusterDeploymentNameLabel: cd.Name,
}
if cd.Labels != nil {
typeStr, ok := cd.Labels[hivev1.HiveClusterTypeLabel]
if ok {
labels[hivev1.HiveClusterTypeLabel] = typeStr
}
}
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: cd.Namespace,
Labels: labels,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}
cdLog.WithField("pvc", pvc.Name).Info("creating persistent volume claim")
cdLog.WithField("derivedObject", pvc.Name).Debug("Setting labels on derived object")
pvc.Labels = k8slabels.AddLabel(pvc.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
pvc.Labels = k8slabels.AddLabel(pvc.Labels, constants.PVCTypeLabel, constants.PVCTypeInstallLogs)
if err := controllerutil.SetControllerReference(cd, pvc, r.scheme); err != nil {
cdLog.WithError(err).Error("error setting controller reference on pvc")
return err
}
err := r.Create(context.TODO(), pvc)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating pvc")
}
return err
}
// getReleaseImage looks for a a release image in clusterdeployment or its corresponding imageset in the following order:
// 1 - specified in the cluster deployment spec.images.releaseImage
// 2 - referenced in the cluster deployment spec.imageSet
func (r *ReconcileClusterDeployment) getReleaseImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, cdLog log.FieldLogger) string {
if cd.Spec.Provisioning.ReleaseImage != "" {
return cd.Spec.Provisioning.ReleaseImage
}
if imageSet != nil {
return imageSet.Spec.ReleaseImage
}
return ""
}
func (r *ReconcileClusterDeployment) getClusterImageSet(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*hivev1.ClusterImageSet, error) {
if cd.Spec.Provisioning.ImageSetRef == nil || len(cd.Spec.Provisioning.ImageSetRef.Name) == 0 {
return nil, nil
}
imageSet := &hivev1.ClusterImageSet{}
if err := r.Get(context.TODO(), types.NamespacedName{Name: cd.Spec.Provisioning.ImageSetRef.Name}, imageSet); err != nil {
if apierrors.IsNotFound(err) {
cdLog.WithField("clusterimageset", cd.Spec.Provisioning.ImageSetRef.Name).Warning("clusterdeployment references non-existent clusterimageset")
if err := r.setImageSetNotFoundCondition(cd, true, cdLog); err != nil {
return nil, err
}
} else {
cdLog.WithError(err).WithField("clusterimageset", cd.Spec.Provisioning.ImageSetRef.Name).Error("unexpected error retrieving clusterimageset")
}
return nil, err
}
return imageSet, nil
}
func (r *ReconcileClusterDeployment) statusUpdate(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update clusterdeployment status")
}
return err
}
func (r *ReconcileClusterDeployment) resolveInstallerImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, releaseImage string, cdLog log.FieldLogger) (*reconcile.Result, error) {
areImagesResolved := cd.Status.InstallerImage != nil && cd.Status.CLIImage != nil
jobKey := client.ObjectKey{Namespace: cd.Namespace, Name: imageset.GetImageSetJobName(cd.Name)}
jobLog := cdLog.WithField("job", jobKey.Name)
existingJob := &batchv1.Job{}
switch err := r.Get(context.Background(), jobKey, existingJob); {
// The job does not exist. If the images have been resolved, continue reconciling. Otherwise, create the job.
case apierrors.IsNotFound(err):
if areImagesResolved {
return nil, nil
}
// If the .status.clusterVersionsStatus.availableUpdates field is nil,
// do a status update to set it to an empty list. All status updates
// done by controllers set this automatically. However, the imageset
// job does not. If the field is still nil when the imageset job tries
// to update the status, then the update will fail validation.
if cd.Status.ClusterVersionStatus.AvailableUpdates == nil {
return &reconcile.Result{}, r.statusUpdate(cd, cdLog)
}
cliImage := images.GetCLIImage()
job := imageset.GenerateImageSetJob(cd, releaseImage, controllerutils.ServiceAccountName, imageset.AlwaysPullImage(cliImage))
cdLog.WithField("derivedObject", job.Name).Debug("Setting labels on derived object")
job.Labels = k8slabels.AddLabel(job.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
job.Labels = k8slabels.AddLabel(job.Labels, constants.JobTypeLabel, constants.JobTypeImageSet)
if err := controllerutil.SetControllerReference(cd, job, r.scheme); err != nil {
cdLog.WithError(err).Error("error setting controller reference on job")
return nil, err
}
jobLog.WithField("releaseImage", releaseImage).Info("creating imageset job")
err = controllerutils.SetupClusterInstallServiceAccount(r, cd.Namespace, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error setting up service account and role")
return nil, err
}
if err := r.Create(context.TODO(), job); err != nil {
jobLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating job")
return nil, err
}
// kickstartDuration calculates the delay between creation of cd and start of imageset job
kickstartDuration := time.Since(cd.CreationTimestamp.Time)
cdLog.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to imageset job seconds")
metricImageSetDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
return &reconcile.Result{}, nil
// There was an error getting the job. Return the error.
case err != nil:
jobLog.WithError(err).Error("cannot get job")
return nil, err
// The job exists and is in the process of getting deleted. If the images were resolved, then continue reconciling.
// If the images were not resolved, requeue and wait for the delete to complete.
case !existingJob.DeletionTimestamp.IsZero():
if areImagesResolved {
return nil, nil
}
jobLog.Debug("imageset job is being deleted. Will recreate once deleted")
return &reconcile.Result{RequeueAfter: defaultRequeueTime}, err
// If job exists and is finished, delete it. If the images were not resolved, then the job will be re-created.
case controllerutils.IsFinished(existingJob):
jobLog.WithField("successful", controllerutils.IsSuccessful(existingJob)).
Warning("Finished job found. Deleting.")
if err := r.Delete(
context.Background(),
existingJob,
client.PropagationPolicy(metav1.DeletePropagationForeground),
); err != nil {
jobLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot delete imageset job")
return nil, err
}
if areImagesResolved {
return nil, nil
}
return &reconcile.Result{}, nil
// The job exists and is in progress. Wait for the job to finish before doing any more reconciliation.
default:
jobLog.Debug("job exists and is in progress")
return &reconcile.Result{}, nil
}
}
func (r *ReconcileClusterDeployment) setDNSNotReadyCondition(cd *hivev1.ClusterDeployment, isReady bool, message string, cdLog log.FieldLogger) error {
status := corev1.ConditionFalse
reason := dnsReadyReason
if !isReady {
status = corev1.ConditionTrue
reason = dnsNotReadyReason
}
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.DNSNotReadyCondition,
status,
reason,
message,
controllerutils.UpdateConditionNever)
if !changed {
return nil
}
cd.Status.Conditions = conditions
cdLog.Debugf("setting DNSNotReadyCondition to %v", status)
return r.Status().Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) setImageSetNotFoundCondition(cd *hivev1.ClusterDeployment, isNotFound bool, cdLog log.FieldLogger) error {
status := corev1.ConditionFalse
reason := clusterImageSetFoundReason
message := fmt.Sprintf("ClusterImageSet %s is available", cd.Spec.Provisioning.ImageSetRef.Name)
if isNotFound {
status = corev1.ConditionTrue
reason = clusterImageSetNotFoundReason
message = fmt.Sprintf("ClusterImageSet %s is not available", cd.Spec.Provisioning.ImageSetRef.Name)
}
conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.ClusterImageSetNotFoundCondition,
status,
reason,
message,
controllerutils.UpdateConditionNever)
if !changed {
return nil
}
cdLog.Infof("setting ClusterImageSetNotFoundCondition to %v", status)
cd.Status.Conditions = conds
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update status conditions")
}
return err
}
// setClusterStatusURLs fetches the openshift console route from the remote cluster and uses it to determine
// the correct APIURL and WebConsoleURL, and then set them in the Status. Typically only called if these Status fields
// are unset.
func (r *ReconcileClusterDeployment) setClusterStatusURLs(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
remoteClientBuilder := r.remoteClusterAPIClientBuilder(cd)
server, err := remoteClientBuilder.APIURL()
if err != nil {
return err
}
cdLog.Debugf("found cluster API URL in kubeconfig: %s", server)
cd.Status.APIURL = server
remoteClient, err := remoteClientBuilder.Build()
if err != nil {
return err
}
routeObject := &routev1.Route{}
if err := remoteClient.Get(
context.Background(),
client.ObjectKey{Namespace: "openshift-console", Name: "console"},
routeObject,
); err != nil {
cdLog.WithError(err).Error("error fetching remote route object")
return err
}
cdLog.Debugf("read remote route object: %s", routeObject)
cd.Status.WebConsoleURL = "https://" + routeObject.Spec.Host
return nil
}
// ensureManagedDNSZoneDeleted is a safety check to ensure that the child managed DNSZone
// linked to the parent cluster deployment gets a deletionTimestamp when the parent is deleted.
// Normally we expect Kube garbage collection to do this for us, but in rare cases we've seen it
// not working as intended.
func (r *ReconcileClusterDeployment) ensureManagedDNSZoneDeleted(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*reconcile.Result, error) {
if !cd.Spec.ManageDNS {
return nil, nil
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: controllerutils.DNSZoneName(cd.Name)}
err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone)
if err != nil && !apierrors.IsNotFound(err) {
cdLog.WithError(err).Error("error looking up managed dnszone")
return &reconcile.Result{}, err
}
if apierrors.IsNotFound(err) || !dnsZone.DeletionTimestamp.IsZero() {
cdLog.Debug("dnszone has been deleted or is getting deleted")
return nil, nil
}
err = r.Delete(context.TODO(), dnsZone,
client.PropagationPolicy(metav1.DeletePropagationForeground))
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting managed dnszone")
}
return &reconcile.Result{}, err
}
func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
result, err := r.ensureManagedDNSZoneDeleted(cd, cdLog)
if result != nil {
return *result, err
}
if err != nil {
return reconcile.Result{}, err
}
// Wait for outstanding provision to be removed before creating deprovision request
if cd.Status.ProvisionRef != nil {
provision := &hivev1.ClusterProvision{}
switch err := r.Get(context.TODO(), types.NamespacedName{Name: cd.Status.ProvisionRef.Name, Namespace: cd.Namespace}, provision); {
case apierrors.IsNotFound(err):
cdLog.Debug("linked provision removed")
case err != nil:
cdLog.WithError(err).Error("could not get provision")
return reconcile.Result{}, err
case provision.DeletionTimestamp == nil:
if err := r.Delete(context.TODO(), provision); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not delete provision")
return reconcile.Result{}, err
}
cdLog.Info("deleted outstanding provision")
return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil
default:
cdLog.Debug("still waiting for outstanding provision to be removed")
return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil
}
}
// Skips creation of deprovision request if PreserveOnDelete is true and cluster is installed
if cd.Spec.PreserveOnDelete {
if cd.Spec.Installed {
cdLog.Warn("skipping creation of deprovisioning request for installed cluster due to PreserveOnDelete=true")
if controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
err = r.removeClusterDeploymentFinalizer(cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// Overriding PreserveOnDelete because we might have deleted the cluster deployment before it finished
// installing, which can cause AWS resources to leak
cdLog.Infof("PreserveOnDelete=true but creating deprovisioning request as cluster was never successfully provisioned")
}
if cd.Spec.ClusterMetadata == nil {
cdLog.Warn("skipping uninstall for cluster that never had clusterID set")
err = r.removeClusterDeploymentFinalizer(cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
}
// We do not yet support deprovision for BareMetal, for now skip deprovision and remove finalizer.
if cd.Spec.Platform.BareMetal != nil {
cdLog.Info("skipping deprovision for BareMetal cluster, removing finalizer")
err := r.removeClusterDeploymentFinalizer(cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
}
// Generate a deprovision request
request, err := generateDeprovision(cd)
if err != nil {
cdLog.WithError(err).Error("error generating deprovision request")
return reconcile.Result{}, err
}
cdLog.WithField("derivedObject", request.Name).Debug("Setting label on derived object")
request.Labels = k8slabels.AddLabel(request.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
err = controllerutil.SetControllerReference(cd, request, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on deprovision request: %v", err)
return reconcile.Result{}, err
}
// Check if deprovision request already exists:
existingRequest := &hivev1.ClusterDeprovision{}
switch err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Name, Namespace: cd.Namespace}, existingRequest); {
case apierrors.IsNotFound(err):
cdLog.Info("creating deprovision request for cluster deployment")
switch err = r.Create(context.TODO(), request); {
case apierrors.IsAlreadyExists(err):
cdLog.Info("deprovision request already exists")
// requeue the clusterdeployment immediately to process the status of the deprovision request
return reconcile.Result{Requeue: true}, nil
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating deprovision request")
// Check if namespace is terminated, if so we can give up, remove the finalizer, and let
// the cluster go away.
ns := &corev1.Namespace{}
err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Namespace}, ns)
if err != nil {
cdLog.WithError(err).Error("error checking for deletionTimestamp on namespace")
return reconcile.Result{}, err
}
if ns.DeletionTimestamp != nil {
cdLog.Warn("detected a namespace deleted before deprovision request could be created, giving up on deprovision and removing finalizer")
err = r.removeClusterDeploymentFinalizer(cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
}
return reconcile.Result{}, err
default:
return reconcile.Result{}, nil
}
case err != nil:
cdLog.WithError(err).Error("error getting deprovision request")
return reconcile.Result{}, err
}
// Deprovision request exists, check whether it has completed
if existingRequest.Status.Completed {
cdLog.Infof("deprovision request completed, removing finalizer")
err = r.removeClusterDeploymentFinalizer(cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
}
cdLog.Debug("deprovision request not yet completed")
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error {
cd = cd.DeepCopy()
controllerutils.AddFinalizer(cd, hivev1.FinalizerDeprovision)
return r.Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error {
cd = cd.DeepCopy()
controllerutils.DeleteFinalizer(cd, hivev1.FinalizerDeprovision)
if err := r.Update(context.TODO(), cd); err != nil {
return err
}
clearUnderwaySecondsMetrics(cd)
// Increment the clusters deleted counter:
metricClustersDeleted.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return nil
}
// setDNSDelayMetric will calculate the amount of time elapsed from clusterdeployment creation
// to when the dnszone became ready, and set a metric to report the delay.
// Will return a bool indicating whether the clusterdeployment has been modified, and whether any error was encountered.
func (r *ReconcileClusterDeployment) setDNSDelayMetric(cd *hivev1.ClusterDeployment, dnsZone *hivev1.DNSZone, cdLog log.FieldLogger) (bool, error) {
modified := false
initializeAnnotations(cd)
if _, ok := cd.Annotations[dnsReadyAnnotation]; ok {
// already have recorded the dnsdelay metric
return modified, nil
}
readyTimestamp := dnsReadyTransitionTime(dnsZone)
if readyTimestamp == nil {
msg := "did not find timestamp for when dnszone became ready"
cdLog.WithField("dnszone", dnsZone.Name).Error(msg)
return modified, fmt.Errorf(msg)
}
dnsDelayDuration := readyTimestamp.Sub(cd.CreationTimestamp.Time)
cdLog.WithField("duration", dnsDelayDuration.Seconds()).Info("DNS ready")
cd.Annotations[dnsReadyAnnotation] = dnsDelayDuration.String()
if err := r.Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to save annotation marking DNS becoming ready")
return modified, err
}
modified = true
metricDNSDelaySeconds.Observe(float64(dnsDelayDuration.Seconds()))
return modified, nil
}
func (r *ReconcileClusterDeployment) ensureManagedDNSZone(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*hivev1.DNSZone, error) {
if cd.Spec.Platform.AWS == nil && cd.Spec.Platform.GCP == nil {
cdLog.Error("cluster deployment platform does not support managed DNS")
if err := r.setDNSNotReadyCondition(cd, false, "Managed DNS is not supported for platform", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return nil, errors.New("managed DNS not supported on platform")
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: controllerutils.DNSZoneName(cd.Name)}
logger := cdLog.WithField("zone", dnsZoneNamespacedName.String())
switch err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone); {
case apierrors.IsNotFound(err):
logger.Info("creating new DNSZone for cluster deployment")
return nil, r.createManagedDNSZone(cd, logger)
case err != nil:
logger.WithError(err).Error("failed to fetch DNS zone")
return nil, err
}
if !metav1.IsControlledBy(dnsZone, cd) {
cdLog.Error("DNS zone already exists but is not owned by cluster deployment")
if err := r.setDNSNotReadyCondition(cd, false, "Existing DNS zone not owned by cluster deployment", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return nil, errors.New("Existing unowned DNS zone")
}
availableCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
if availableCondition == nil || availableCondition.Status != corev1.ConditionTrue {
// The clusterdeployment will be queued when the owned DNSZone's status
// is updated to available.
cdLog.Debug("DNSZone is not yet available. Waiting for zone to become available.")
if err := r.setDNSNotReadyCondition(cd, false, "DNS Zone not yet available", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return nil, nil
}
if err := r.setDNSNotReadyCondition(cd, true, "DNS Zone available", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return dnsZone, nil
}
func (r *ReconcileClusterDeployment) createManagedDNSZone(cd *hivev1.ClusterDeployment, logger log.FieldLogger) error {
dnsZone := &hivev1.DNSZone{
ObjectMeta: metav1.ObjectMeta{
Name: controllerutils.DNSZoneName(cd.Name),
Namespace: cd.Namespace,
},
Spec: hivev1.DNSZoneSpec{
Zone: cd.Spec.BaseDomain,
LinkToParentDomain: true,
},
}
switch {
case cd.Spec.Platform.AWS != nil:
additionalTags := make([]hivev1.AWSResourceTag, 0, len(cd.Spec.Platform.AWS.UserTags))
for k, v := range cd.Spec.Platform.AWS.UserTags {
additionalTags = append(additionalTags, hivev1.AWSResourceTag{Key: k, Value: v})
}
region := ""
if strings.HasPrefix(cd.Spec.Platform.AWS.Region, constants.AWSChinaRegionPrefix) {
region = constants.AWSChinaRoute53Region
}
dnsZone.Spec.AWS = &hivev1.AWSDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.AWS.CredentialsSecretRef,
AdditionalTags: additionalTags,
Region: region,
}
case cd.Spec.Platform.GCP != nil:
dnsZone.Spec.GCP = &hivev1.GCPDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.GCP.CredentialsSecretRef,
}
}
logger.WithField("derivedObject", dnsZone.Name).Debug("Setting labels on derived object")
dnsZone.Labels = k8slabels.AddLabel(dnsZone.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
dnsZone.Labels = k8slabels.AddLabel(dnsZone.Labels, constants.DNSZoneTypeLabel, constants.DNSZoneTypeChild)
if err := controllerutil.SetControllerReference(cd, dnsZone, r.scheme); err != nil {
logger.WithError(err).Error("error setting controller reference on dnszone")
return err
}
err := r.Create(context.TODO(), dnsZone)
if err != nil {
logger.WithError(err).Log(controllerutils.LogLevel(err), "cannot create DNS zone")
return err
}
logger.Info("dns zone created")
return nil
}
func selectorPodWatchHandler(a handler.MapObject) []reconcile.Request {
retval := []reconcile.Request{}
pod := a.Object.(*corev1.Pod)
if pod == nil {
// Wasn't a Pod, bail out. This should not happen.
log.Errorf("Error converting MapObject.Object to Pod. Value: %+v", a.Object)
return retval
}
if pod.Labels == nil {
return retval
}
cdName, ok := pod.Labels[constants.ClusterDeploymentNameLabel]
if !ok {
return retval
}
retval = append(retval, reconcile.Request{NamespacedName: types.NamespacedName{
Name: cdName,
Namespace: pod.Namespace,
}})
return retval
}
// cleanupInstallLogPVC will immediately delete the PVC (should it exist) if the cluster was installed successfully, without retries.
// If there were retries, it will delete the PVC if it has been more than 7 days since the job was completed.
func (r *ReconcileClusterDeployment) cleanupInstallLogPVC(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
if !cd.Spec.Installed {
return nil
}
pvc := &corev1.PersistentVolumeClaim{}
err := r.Get(context.TODO(), types.NamespacedName{Name: GetInstallLogsPVCName(cd), Namespace: cd.Namespace}, pvc)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
cdLog.WithError(err).Error("error looking up install logs PVC")
return err
}
pvcLog := cdLog.WithField("pvc", pvc.Name)
switch {
case cd.Status.InstallRestarts == 0:
pvcLog.Info("deleting logs PersistentVolumeClaim for installed cluster with no restarts")
case cd.Status.InstalledTimestamp == nil:
pvcLog.Warn("deleting logs PersistentVolumeClaim for cluster with errors but no installed timestamp")
// Otherwise, delete if more than 7 days have passed.
case time.Since(cd.Status.InstalledTimestamp.Time) > (7 * 24 * time.Hour):
pvcLog.Info("deleting logs PersistentVolumeClaim for cluster that was installed after restarts more than 7 days ago")
default:
cdLog.WithField("pvc", pvc.Name).Debug("preserving logs PersistentVolumeClaim for cluster with install restarts for 7 days")
return nil
}
if err := r.Delete(context.TODO(), pvc); err != nil {
pvcLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting install logs PVC")
return err
}
return nil
}
func generateDeprovision(cd *hivev1.ClusterDeployment) (*hivev1.ClusterDeprovision, error) {
req := &hivev1.ClusterDeprovision{
ObjectMeta: metav1.ObjectMeta{
Name: cd.Name,
Namespace: cd.Namespace,
},
Spec: hivev1.ClusterDeprovisionSpec{
InfraID: cd.Spec.ClusterMetadata.InfraID,
ClusterID: cd.Spec.ClusterMetadata.ClusterID,
},
}
switch {
case cd.Spec.Platform.AWS != nil:
req.Spec.Platform.AWS = &hivev1.AWSClusterDeprovision{
Region: cd.Spec.Platform.AWS.Region,
CredentialsSecretRef: &cd.Spec.Platform.AWS.CredentialsSecretRef,
}
case cd.Spec.Platform.Azure != nil:
req.Spec.Platform.Azure = &hivev1.AzureClusterDeprovision{
CredentialsSecretRef: &cd.Spec.Platform.Azure.CredentialsSecretRef,
}
case cd.Spec.Platform.GCP != nil:
req.Spec.Platform.GCP = &hivev1.GCPClusterDeprovision{
Region: cd.Spec.Platform.GCP.Region,
CredentialsSecretRef: &cd.Spec.Platform.GCP.CredentialsSecretRef,
}
default:
return nil, errors.New("unsupported cloud provider for deprovision")
}
return req, nil
}
func generatePullSecretObj(pullSecret string, pullSecretName string, cd *hivev1.ClusterDeployment) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: pullSecretName,
Namespace: cd.Namespace,
},
Type: corev1.SecretTypeDockerConfigJson,
StringData: map[string]string{
corev1.DockerConfigJsonKey: pullSecret,
},
}
}
func dnsReadyTransitionTime(dnsZone *hivev1.DNSZone) *time.Time {
readyCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
if readyCondition != nil && readyCondition.Status == corev1.ConditionTrue {
return &readyCondition.LastTransitionTime.Time
}
return nil
}
func clearUnderwaySecondsMetrics(cd *hivev1.ClusterDeployment) {
// If we've successfully cleared the deprovision finalizer we know this is a good time to
// reset the underway metric to 0, after which it will no longer be reported.
hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(0.0)
// Clear the install underway seconds metric if this cluster was still installing.
if !cd.Spec.Installed {
hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(0.0)
}
}
// initializeAnnotations() initializes the annotations if it is not already
func initializeAnnotations(cd *hivev1.ClusterDeployment) {
if cd.Annotations == nil {
cd.Annotations = map[string]string{}
}
}
// mergePullSecrets merges the global pull secret JSON (if defined) with the cluster's pull secret JSON (if defined)
// An error will be returned if neither is defined
func (r *ReconcileClusterDeployment) mergePullSecrets(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (string, error) {
var localPullSecret string
var err error
// For code readability let's call the pull secret in cluster deployment config as local pull secret
if cd.Spec.PullSecretRef != nil {
localPullSecret, err = controllerutils.LoadSecretData(r.Client, cd.Spec.PullSecretRef.Name, cd.Namespace, corev1.DockerConfigJsonKey)
if err != nil {
if !apierrors.IsNotFound(err) {
return "", err
}
}
}
// Check if global pull secret from env as it comes from hive config
globalPullSecretName := os.Getenv(constants.GlobalPullSecret)
var globalPullSecret string
if len(globalPullSecretName) != 0 {
globalPullSecret, err = controllerutils.LoadSecretData(r.Client, globalPullSecretName, constants.HiveNamespace, corev1.DockerConfigJsonKey)
if err != nil {
return "", errors.Wrap(err, "global pull secret could not be retrieved")
}
}
switch {
case globalPullSecret != "" && localPullSecret != "":
// Merge local pullSecret and globalPullSecret. If both pull secrets have same registry name
// then the merged pull secret will have registry secret from local pull secret
pullSecret, err := controllerutils.MergeJsons(globalPullSecret, localPullSecret, cdLog)
if err != nil {
errMsg := "unable to merge global pull secret with local pull secret"
cdLog.WithError(err).Error(errMsg)
return "", errors.Wrap(err, errMsg)
}
return pullSecret, nil
case globalPullSecret != "":
return globalPullSecret, nil
case localPullSecret != "":
return localPullSecret, nil
default:
errMsg := "clusterdeployment must specify pull secret since hiveconfig does not specify a global pull secret"
cdLog.Error(errMsg)
return "", errors.New(errMsg)
}
}
// updatePullSecretInfo creates or updates the merged pull secret for the clusterdeployment.
// It returns true when the merged pull secret has been created or updated.
func (r *ReconcileClusterDeployment) updatePullSecretInfo(pullSecret string, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (bool, error) {
var err error
pullSecretObjExists := true
existingPullSecretObj := &corev1.Secret{}
mergedSecretName := constants.GetMergedPullSecretName(cd)
err = r.Get(context.TODO(), types.NamespacedName{Name: mergedSecretName, Namespace: cd.Namespace}, existingPullSecretObj)
if err != nil {
if apierrors.IsNotFound(err) {
cdLog.Info("Existing pull secret object not found")
pullSecretObjExists = false
} else {
return false, errors.Wrap(err, "Error getting pull secret from cluster deployment")
}
}
if pullSecretObjExists {
existingPullSecret, ok := existingPullSecretObj.Data[corev1.DockerConfigJsonKey]
if !ok {
return false, fmt.Errorf("Pull secret %s did not contain key %s", mergedSecretName, corev1.DockerConfigJsonKey)
}
if string(existingPullSecret) == pullSecret {
cdLog.Debug("Existing and the new merged pull secret are same")
return false, nil
}
cdLog.Info("Existing merged pull secret hash did not match with latest merged pull secret")
existingPullSecretObj.Data[corev1.DockerConfigJsonKey] = []byte(pullSecret)
err = r.Update(context.TODO(), existingPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error updating merged pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Updated the merged pull secret object successfully")
} else {
// create a new pull secret object
newPullSecretObj := generatePullSecretObj(
pullSecret,
mergedSecretName,
cd,
)
cdLog.WithField("derivedObject", newPullSecretObj.Name).Debug("Setting labels on derived object")
newPullSecretObj.Labels = k8slabels.AddLabel(newPullSecretObj.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
newPullSecretObj.Labels = k8slabels.AddLabel(newPullSecretObj.Labels, constants.SecretTypeLabel, constants.SecretTypeMergedPullSecret)
err = controllerutil.SetControllerReference(cd, newPullSecretObj, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on new merged pull secret: %v", err)
return false, err
}
err = r.Create(context.TODO(), newPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error creating new pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Created the merged pull secret object successfully")
}
return true, nil
}
func calculateNextProvisionTime(failureTime time.Time, retries int, cdLog log.FieldLogger) time.Time {
// (2^currentRetries) * 60 seconds up to a max of 24 hours.
const sleepCap = 24 * time.Hour
const retryCap = 11 // log_2_(24*60)
if retries >= retryCap {
return failureTime.Add(sleepCap)
}
return failureTime.Add((1 << uint(retries)) * time.Minute)
}
func (r *ReconcileClusterDeployment) existingProvisions(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) ([]*hivev1.ClusterProvision, error) {
provisionList := &hivev1.ClusterProvisionList{}
if err := r.List(
context.TODO(),
provisionList,
client.InNamespace(cd.Namespace),
client.MatchingLabels(map[string]string{constants.ClusterDeploymentNameLabel: cd.Name}),
); err != nil {
cdLog.WithError(err).Warn("could not list provisions for clusterdeployment")
return nil, err
}
provisions := make([]*hivev1.ClusterProvision, len(provisionList.Items))
for i := range provisionList.Items {
provisions[i] = &provisionList.Items[i]
}
return provisions, nil
}
func (r *ReconcileClusterDeployment) getFirstProvision(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) *hivev1.ClusterProvision {
provisions, err := r.existingProvisions(cd, cdLog)
if err != nil {
return nil
}
for _, provision := range provisions {
if provision.Spec.Attempt == 0 {
return provision
}
}
cdLog.Warn("could not find the first provision for clusterdeployment")
return nil
}
func (r *ReconcileClusterDeployment) adoptProvision(cd *hivev1.ClusterDeployment, provision *hivev1.ClusterProvision, cdLog log.FieldLogger) error {
pLog := cdLog.WithField("provision", provision.Name)
cd.Status.ProvisionRef = &corev1.LocalObjectReference{Name: provision.Name}
if err := r.Status().Update(context.TODO(), cd); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "could not adopt provision")
return err
}
pLog.Info("adopted provision")
return nil
}
func (r *ReconcileClusterDeployment) deleteStaleProvisions(provs []*hivev1.ClusterProvision, cdLog log.FieldLogger) {
// Cap the number of existing provisions. Always keep the earliest provision as
// it is used to determine the total time that it took to install. Take off
// one extra to make room for the new provision being started.
amountToDelete := len(provs) - maxProvisions
if amountToDelete <= 0 {
return
}
cdLog.Infof("Deleting %d old provisions", amountToDelete)
sort.Slice(provs, func(i, j int) bool { return provs[i].Spec.Attempt < provs[j].Spec.Attempt })
for _, provision := range provs[1 : amountToDelete+1] {
pLog := cdLog.WithField("provision", provision.Name)
pLog.Info("Deleting old provision")
if err := r.Delete(context.TODO(), provision); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to delete old provision")
}
}
}
// getAllSyncSetInstances returns all syncset instances for a specific cluster deployment
func (r *ReconcileClusterDeployment) getAllSyncSetInstances(cd *hivev1.ClusterDeployment) ([]*hivev1.SyncSetInstance, error) {
list := &hivev1.SyncSetInstanceList{}
err := r.List(context.TODO(), list, client.InNamespace(cd.Namespace))
if err != nil {
return nil, err
}
syncSetInstances := []*hivev1.SyncSetInstance{}
for i, syncSetInstance := range list.Items {
if syncSetInstance.Spec.ClusterDeploymentRef.Name == cd.Name {
syncSetInstances = append(syncSetInstances, &list.Items[i])
}
}
return syncSetInstances, nil
}
// checkForFailedSyncSetInstance returns true if it finds failed syncset instance
func checkForFailedSyncSetInstance(syncSetInstances []*hivev1.SyncSetInstance) bool {
for _, syncSetInstance := range syncSetInstances {
if checkSyncSetConditionsForFailure(syncSetInstance.Status.Conditions) {
return true
}
for _, r := range syncSetInstance.Status.Resources {
if checkSyncSetConditionsForFailure(r.Conditions) {
return true
}
}
for _, p := range syncSetInstance.Status.Patches {
if checkSyncSetConditionsForFailure(p.Conditions) {
return true
}
}
for _, s := range syncSetInstance.Status.Secrets {
if checkSyncSetConditionsForFailure(s.Conditions) {
return true
}
}
}
return false
}
// checkSyncSetConditionsForFailure returns true when the condition contains hivev1.ApplyFailureSyncCondition
// and condition status is equal to true
func checkSyncSetConditionsForFailure(conds []hivev1.SyncCondition) bool {
for _, c := range conds {
if c.Status != corev1.ConditionTrue {
continue
}
switch c.Type {
case hivev1.ApplyFailureSyncCondition, hivev1.DeletionFailedSyncCondition, hivev1.UnknownObjectSyncCondition:
return true
}
}
return false
}
// setSyncSetFailedCondition returns true when it sets or updates the hivev1.SyncSetFailedCondition
func (r *ReconcileClusterDeployment) setSyncSetFailedCondition(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (bool, error) {
// get all syncset instances for this cluster deployment
syncSetInstances, err := r.getAllSyncSetInstances(cd)
if err != nil {
cdLog.WithError(err).Error("Unable to list related syncset instances for cluster deployment")
return false, err
}
isFailedCondition := checkForFailedSyncSetInstance(syncSetInstances)
status := corev1.ConditionFalse
reason := "SyncSetApplySuccess"
message := "SyncSet apply is successful"
if isFailedCondition {
status = corev1.ConditionTrue
reason = "SyncSetApplyFailure"
message = "One of the SyncSetInstance apply has failed"
}
conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.SyncSetFailedCondition,
status,
reason,
message,
controllerutils.UpdateConditionNever,
)
if !changed {
return false, nil
}
cd.Status.Conditions = conds
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating syncset failed condition")
return false, err
}
return true, nil
}
// getClusterPlatform returns the platform of a given ClusterDeployment
func getClusterPlatform(cd *hivev1.ClusterDeployment) string {
switch {
case cd.Spec.Platform.AWS != nil:
return "aws"
case cd.Spec.Platform.Azure != nil:
return "azure"
case cd.Spec.Platform.GCP != nil:
return "gcp"
case cd.Spec.Platform.BareMetal != nil:
return "baremetal"
}
return "unknown"
}
| 1 | 10,972 | lets stick with lowercase for consistency. | openshift-hive | go |
@@ -17,10 +17,9 @@ limitations under the License.
package acme
import (
- corev1 "k8s.io/api/core/v1"
-
cmacme "github.com/jetstack/cert-manager/pkg/apis/acme/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
)
// IsFinalState will return true if the given ACME State is a 'final' state. | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package acme
import (
corev1 "k8s.io/api/core/v1"
cmacme "github.com/jetstack/cert-manager/pkg/apis/acme/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
)
// IsFinalState will return true if the given ACME State is a 'final' state.
// This is either one of 'ready', 'invalid' or 'expired'.
// The 'valid' state is a special case, as it is a final state for Challenges but
// not for Orders.
func IsFinalState(s cmacme.State) bool {
switch s {
case cmacme.Valid:
return true
}
return IsFailureState(s)
}
func IsFailureState(s cmacme.State) bool {
switch s {
case cmacme.Invalid, cmacme.Expired, cmacme.Errored:
return true
}
return false
}
// PrivateKeySelector will default the SecretKeySelector with a default secret key
// if one is not already specified.
func PrivateKeySelector(sel cmmeta.SecretKeySelector) cmmeta.SecretKeySelector {
if len(sel.Key) == 0 {
sel.Key = corev1.TLSPrivateKeyKey
}
return sel
}
| 1 | 23,552 | ordering of imports went wrong | jetstack-cert-manager | go |
@@ -84,6 +84,11 @@ public class AllDataFilesTable extends BaseMetadataTable {
this.fileSchema = fileSchema;
}
+ @Override
+ protected String tableType() {
+ return String.valueOf(MetadataTableType.ALL_DATA_FILES);
+ }
+
@Override
protected TableScan newRefinedScan(TableOperations ops, Table table, Schema schema, TableScanContext context) {
return new AllDataFilesTableScan(ops, table, schema, fileSchema, context); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.io.IOException;
import java.util.List;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.expressions.ResidualEvaluator;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.util.ParallelIterable;
import org.apache.iceberg.util.ThreadPools;
/**
* A {@link Table} implementation that exposes a table's valid data files as rows.
* <p>
* A valid data file is one that is readable from any snapshot currently tracked by the table.
* <p>
* This table may return duplicate rows.
*/
public class AllDataFilesTable extends BaseMetadataTable {
AllDataFilesTable(TableOperations ops, Table table) {
this(ops, table, table.name() + ".all_data_files");
}
AllDataFilesTable(TableOperations ops, Table table, String name) {
super(ops, table, name);
}
@Override
public TableScan newScan() {
return new AllDataFilesTableScan(operations(), table(), schema());
}
@Override
public Schema schema() {
Schema schema = new Schema(DataFile.getType(table().spec().partitionType()).fields());
if (table().spec().fields().size() < 1) {
// avoid returning an empty struct, which is not always supported. instead, drop the partition field (id 102)
return TypeUtil.selectNot(schema, Sets.newHashSet(102));
} else {
return schema;
}
}
@Override
MetadataTableType metadataTableType() {
return MetadataTableType.ALL_DATA_FILES;
}
public static class AllDataFilesTableScan extends BaseAllMetadataTableScan {
private final Schema fileSchema;
AllDataFilesTableScan(TableOperations ops, Table table, Schema fileSchema) {
super(ops, table, fileSchema);
this.fileSchema = fileSchema;
}
private AllDataFilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema,
TableScanContext context) {
super(ops, table, schema, context);
this.fileSchema = fileSchema;
}
@Override
protected TableScan newRefinedScan(TableOperations ops, Table table, Schema schema, TableScanContext context) {
return new AllDataFilesTableScan(ops, table, schema, fileSchema, context);
}
@Override
public TableScan useSnapshot(long scanSnapshotId) {
throw new UnsupportedOperationException("Cannot select snapshot: all_data_files is for all snapshots");
}
@Override
public TableScan asOfTime(long timestampMillis) {
throw new UnsupportedOperationException("Cannot select snapshot: all_data_files is for all snapshots");
}
@Override
public long targetSplitSize() {
return tableOps().current().propertyAsLong(
TableProperties.METADATA_SPLIT_SIZE, TableProperties.METADATA_SPLIT_SIZE_DEFAULT);
}
@Override
protected CloseableIterable<FileScanTask> planFiles(
TableOperations ops, Snapshot snapshot, Expression rowFilter,
boolean ignoreResiduals, boolean caseSensitive, boolean colStats) {
CloseableIterable<ManifestFile> manifests = allDataManifestFiles(ops.current().snapshots());
String schemaString = SchemaParser.toJson(schema());
String specString = PartitionSpecParser.toJson(PartitionSpec.unpartitioned());
Expression filter = ignoreResiduals ? Expressions.alwaysTrue() : rowFilter;
ResidualEvaluator residuals = ResidualEvaluator.unpartitioned(filter);
// Data tasks produce the table schema, not the projection schema and projection is done by processing engines.
// This data task needs to use the table schema, which may not include a partition schema to avoid having an
// empty struct in the schema for unpartitioned tables. Some engines, like Spark, can't handle empty structs in
// all cases.
return CloseableIterable.transform(manifests, manifest ->
new DataFilesTable.ManifestReadTask(ops.io(), manifest, fileSchema, schemaString, specString, residuals));
}
}
private static CloseableIterable<ManifestFile> allDataManifestFiles(List<Snapshot> snapshots) {
try (CloseableIterable<ManifestFile> iterable = new ParallelIterable<>(
Iterables.transform(snapshots, snapshot -> (Iterable<ManifestFile>) () -> snapshot.dataManifests().iterator()),
ThreadPools.getWorkerPool())) {
return CloseableIterable.withNoopClose(Sets.newHashSet(iterable));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to close parallel iterable");
}
}
}
| 1 | 37,766 | I think these can all be .name() to get the value we are looking for | apache-iceberg | java |
@@ -2622,6 +2622,16 @@ class FunctionCallTest extends TestCase
array_push();',
'error_message' => 'TooFewArguments',
],
+ 'printOnlyString' => [
+ '<?php
+ print [];',
+ 'error_message' => 'InvalidArgument',
+ ],
+ 'printReturns1' => [
+ '<?php
+ (print "test") === 2;',
+ 'error_message' => 'TypeDoesNotContainType',
+ ],
];
}
} | 1 | <?php
namespace Psalm\Tests;
use const DIRECTORY_SEPARATOR;
class FunctionCallTest extends TestCase
{
use Traits\InvalidCodeAnalysisTestTrait;
use Traits\ValidCodeAnalysisTestTrait;
/**
* @return iterable<string,array{string,assertions?:array<string,string>,error_levels?:string[]}>
*/
public function providerValidCodeParse()
{
return [
'arrayFilter' => [
'<?php
$d = array_filter(["a" => 5, "b" => 12, "c" => null]);
$e = array_filter(
["a" => 5, "b" => 12, "c" => null],
function(?int $i): bool {
return true;
}
);',
'assertions' => [
'$d' => 'array<string, int>',
'$e' => 'array<string, int|null>',
],
],
'arrayFilterAdvanced' => [
'<?php
$f = array_filter(["a" => 5, "b" => 12, "c" => null], function(?int $val, string $key): bool {
return true;
}, ARRAY_FILTER_USE_BOTH);
$g = array_filter(["a" => 5, "b" => 12, "c" => null], function(string $val): bool {
return true;
}, ARRAY_FILTER_USE_KEY);
$bar = "bar";
$foo = [
$bar => function (): string {
return "baz";
},
];
$foo = array_filter(
$foo,
function (string $key): bool {
return $key === "bar";
},
ARRAY_FILTER_USE_KEY
);',
'assertions' => [
'$f' => 'array<string, int|null>',
'$g' => 'array<string, int|null>',
],
],
'arrayFilterIgnoreNullable' => [
'<?php
class A {
/**
* @return array<int, self|null>
*/
public function getRows() : array {
return [new self, null];
}
public function filter() : void {
$arr = array_filter(
static::getRows(),
function (self $row) : bool {
return is_a($row, static::class);
}
);
}
}',
'assertions' => [],
'error_levels' => ['PossiblyInvalidArgument'],
],
'arrayFilterAllowTrim' => [
'<?php
$foo = array_filter(["hello ", " "], "trim");',
],
'arrayFilterAllowNull' => [
'<?php
function foo() : array {
return array_filter(
array_map(
/** @return null */
function (int $arg) {
return null;
},
[1, 2, 3]
)
);
}',
],
'arrayFilterNamedFunction' => [
'<?php
/**
* @param array<int, DateTimeImmutable|null> $a
* @return array<int, DateTimeImmutable>
*/
function foo(array $a) : array {
return array_filter($a, "is_object");
}',
],
'typedArrayWithDefault' => [
'<?php
class A {}
/** @param array<A> $a */
function fooFoo(array $a = []): void {
}',
],
'abs' => [
'<?php
$a = abs(-5);
$b = abs(-7.5);
$c = $_GET["c"];
$c = is_numeric($c) ? abs($c) : null;',
'assertions' => [
'$a' => 'int',
'$b' => 'float',
'$c' => 'null|numeric',
],
'error_levels' => ['MixedAssignment', 'MixedArgument'],
],
'validDocblockParamDefault' => [
'<?php
/**
* @param int|false $p
* @return void
*/
function f($p = false) {}',
],
'byRefNewString' => [
'<?php
function fooFoo(?string &$v): void {}
fooFoo($a);',
],
'byRefVariableFunctionExistingArray' => [
'<?php
$arr = [];
function fooFoo(array &$v): void {}
$function = "fooFoo";
$function($arr);
if ($arr) {}',
],
'byRefProperty' => [
'<?php
class A {
/** @var string */
public $foo = "hello";
}
$a = new A();
function fooFoo(string &$v): void {}
fooFoo($a->foo);',
],
'namespaced' => [
'<?php
namespace A;
/** @return void */
function f(int $p) {}
f(5);',
],
'namespacedRootFunctionCall' => [
'<?php
namespace {
/** @return void */
function foo() { }
}
namespace A\B\C {
foo();
}',
],
'namespacedAliasedFunctionCall' => [
'<?php
namespace Aye {
/** @return void */
function foo() { }
}
namespace Bee {
use Aye as A;
A\foo();
}',
],
'arrayKeys' => [
'<?php
$a = array_keys(["a" => 1, "b" => 2]);',
'assertions' => [
'$a' => 'list<string>',
],
],
'arrayKeysMixed' => [
'<?php
/** @var array */
$b = ["a" => 5];
$a = array_keys($b);',
'assertions' => [
'$a' => 'list<array-key>',
],
'error_levels' => ['MixedArgument'],
],
'arrayValues' => [
'<?php
$b = array_values(["a" => 1, "b" => 2]);
$c = array_values(["a" => "hello", "b" => "jello"]);',
'assertions' => [
'$b' => 'non-empty-list<int>',
'$c' => 'non-empty-list<string>',
],
],
'arrayCombine' => [
'<?php
$c = array_combine(["a", "b", "c"], [1, 2, 3]);',
'assertions' => [
'$c' => 'array<string, int>|false',
],
],
'arrayCombineFalse' => [
'<?php
$c = array_combine(["a", "b"], [1, 2, 3]);',
'assertions' => [
'$c' => 'array<string, int>|false',
],
],
'arrayMerge' => [
'<?php
$d = array_merge(["a", "b", "c"], [1, 2, 3]);',
'assertions' => [
'$d' => 'array{0: string, 1: string, 2: string, 3: int, 4: int, 5: int}',
],
],
'arrayMergeListResult' => [
'<?php
/**
* @param list<string> $list
* @return list<string>
*/
function foo(array $list) : array {
return array_merge($list, ["test"]);
}
/**
* @param array<int, string> $list
* @return list<string>
*/
function bar(array $list) : array {
return array_merge($list, ["test"]);
}',
],
'arrayReverseDontPreserveKey' => [
'<?php
$d = array_reverse(["a", "b", 1, "d" => 4]);',
'assertions' => [
'$d' => 'non-empty-array<int|string, int|string>',
],
],
'arrayReverseDontPreserveKeyExplicitArg' => [
'<?php
$d = array_reverse(["a", "b", 1, "d" => 4], false);',
'assertions' => [
'$d' => 'non-empty-array<int|string, int|string>',
],
],
'arrayReversePreserveKey' => [
'<?php
$d = array_reverse(["a", "b", 1], true);',
'assertions' => [
'$d' => 'non-empty-array<int, int|string>',
],
],
'arrayDiff' => [
'<?php
$d = array_diff(["a" => 5, "b" => 12], [5]);',
'assertions' => [
'$d' => 'array<string, int>',
],
],
'arrayDiffIsVariadic' => [
'<?php
array_diff([], [], [], [], []);',
'assertions' => [],
],
'arrayDiffKeyIsVariadic' => [
'<?php
array_diff_key([], [], [], [], []);',
'assertions' => [],
],
'arrayPopMixed' => [
'<?php
/** @var mixed */
$b = ["a" => 5, "c" => 6];
$a = array_pop($b);',
'assertions' => [
'$a' => 'mixed',
'$b' => 'mixed',
],
'error_levels' => ['MixedAssignment', 'MixedArgument'],
],
'arrayPopNonEmpty' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if ($a) {
$b = array_pop($a);
}
$c = array_pop($a);',
'assertions' => [
'$b' => 'int',
'$c' => 'int|null',
],
],
'arrayPopNonEmptyAfterIsset' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (isset($a["a"])) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterCount' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (count($a)) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayShiftNonEmptyList' => [
'<?php
/** @param non-empty-list $arr */
function type_of_array_shift(array $arr) : int {
if (\is_int($arr[0])) {
return \array_shift($arr);
}
return 0;
}',
],
'noRedundantConditionAfterArrayObjectCountCheck' => [
'<?php
/** @var ArrayObject<int, int> */
$a = [];
$b = 5;
if (count($a)) {}',
],
'noRedundantConditionAfterMixedOrEmptyArrayCountCheck' => [
'<?php
function foo(string $s) : void {
$a = json_decode($s) ?: [];
if (count($a)) {}
if (!count($a)) {}
}',
'assertions' => [],
'error_levels' => ['MixedAssignment', 'MixedArgument'],
],
'objectLikeArrayAssignmentInConditional' => [
'<?php
$a = [];
if (rand(0, 1)) {
$a["a"] = 5;
}
if (count($a)) {}
if (!count($a)) {}',
],
'noRedundantConditionAfterCheckingExplodeLength' => [
'<?php
/** @var string */
$s = "hello";
$segments = explode(".", $s);
if (count($segments) === 1) {}',
],
'arrayPopNonEmptyAfterCountEqualsOne' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (count($a) === 1) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterCountSoftEqualsOne' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (count($a) == 1) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterCountGreaterThanOne' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (count($a) > 0) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterCountGreaterOrEqualsOne' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (count($a) >= 1) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterCountEqualsOneReversed' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (1 === count($a)) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterCountSoftEqualsOneReversed' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (1 == count($a)) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterCountGreaterThanOneReversed' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (0 < count($a)) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterCountGreatorOrEqualToOneReversed' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$b = 5;
if (1 <= count($a)) {
$b = array_pop($a);
}',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterThreeAssertions' => [
'<?php
class A {}
class B extends A {
/** @var array<int, string> */
public $arr = [];
}
/** @var array<A> */
$replacement_stmts = [];
if (!$replacement_stmts
|| !$replacement_stmts[0] instanceof B
|| count($replacement_stmts[0]->arr) > 1
) {
return null;
}
$b = $replacement_stmts[0]->arr;',
'assertions' => [
'$b' => 'array<int, string>',
],
],
'arrayPopNonEmptyAfterArrayAddition' => [
'<?php
/** @var array<string, int> */
$a = ["a" => 5, "b" => 6, "c" => 7];
$a["foo"] = 10;
$b = array_pop($a);',
'assertions' => [
'$b' => 'int',
],
],
'arrayPopNonEmptyAfterMixedArrayAddition' => [
'<?php
/** @var array */
$a = ["a" => 5, "b" => 6, "c" => 7];
$a[] = "hello";
$b = array_pop($a);',
'assertions' => [
'$b' => 'mixed|string',
],
'error_levels' => [
'MixedAssignment',
],
],
'countMoreThan0CanBeInverted' => [
'<?php
$a = [];
if (rand(0, 1)) {
$a[] = "hello";
}
if (count($a) > 0) {
exit;
}',
'assertions' => [
'$a' => 'array<empty, empty>',
],
],
'uasort' => [
'<?php
uasort(
$manifest,
function ($a, $b) {
return strcmp($a["parent"],$b["parent"]);
}
);',
'assertions' => [],
'error_levels' => [
'MixedArrayAccess',
'MixedArgument',
'MissingClosureParamType',
'MissingClosureReturnType',
],
],
'byRefAfterCallable' => [
'<?php
/**
* @param callable $callback
* @return void
*/
function route($callback) {
if (!is_callable($callback)) { }
$a = preg_match("", "", $b);
if ($b[0]) {}
}',
'assertions' => [],
'error_levels' => [
'MixedAssignment',
'MixedArrayAccess',
'RedundantConditionGivenDocblockType',
],
],
'ignoreNullablePregReplace' => [
'<?php
function foo(string $s): string {
$s = preg_replace("/hello/", "", $s);
if ($s === null) {
return "hello";
}
return $s;
}
function bar(string $s): string {
$s = preg_replace("/hello/", "", $s);
return $s;
}
function bat(string $s): ?string {
$s = preg_replace("/hello/", "", $s);
return $s;
}',
],
'extractVarCheck' => [
'<?php
function takesString(string $str): void {}
$foo = null;
$a = ["$foo" => "bar"];
extract($a);
takesString($foo);',
'assertions' => [],
'error_levels' => [
'MixedAssignment',
'MixedArrayAccess',
'MixedArgument',
],
],
'arrayMergeObjectLike' => [
'<?php
/**
* @param array<string, int> $a
* @return array<string, int>
*/
function foo($a)
{
return $a;
}
$a1 = ["hi" => 3];
$a2 = ["bye" => 5];
$a3 = array_merge($a1, $a2);
foo($a3);',
'assertions' => [
'$a3' => 'array{bye: int, hi: int}',
],
],
'arrayRand' => [
'<?php
$vars = ["x" => "a", "y" => "b"];
$c = array_rand($vars);
$d = $vars[$c];
$more_vars = ["a", "b"];
$e = array_rand($more_vars);',
'assertions' => [
'$vars' => 'array{x: string, y: string}',
'$c' => 'string',
'$d' => 'string',
'$more_vars' => 'array{0: string, 1: string}',
'$e' => 'int',
],
],
'arrayRandMultiple' => [
'<?php
$vars = ["x" => "a", "y" => "b"];
$b = 3;
$c = array_rand($vars, 1);
$d = array_rand($vars, 2);
$e = array_rand($vars, 3);
$f = array_rand($vars, $b);',
'assertions' => [
'$vars' => 'array{x: string, y: string}',
'$c' => 'string',
'$e' => 'list<string>',
'$f' => 'list<string>|string',
],
],
'arrayKeysNoEmpty' => [
'<?php
function expect_string(string $x): void {
echo $x;
}
function test(): void {
foreach (array_keys([]) as $key) {
expect_string($key);
}
}',
'assertions' => [],
'error_levels' => ['MixedAssignment', 'MixedArgument', 'MixedArgumentTypeCoercion'],
],
'compact' => [
'<?php
function test(): array {
return compact(["val"]);
}',
],
'objectLikeKeyChecksAgainstGeneric' => [
'<?php
/**
* @param array<string, string> $b
*/
function a($b): string
{
return $b["a"];
}
a(["a" => "hello"]);',
],
'objectLikeKeyChecksAgainstObjectLike' => [
'<?php
/**
* @param array{a: string} $b
*/
function a($b): string
{
return $b["a"];
}
a(["a" => "hello"]);',
],
'getenv' => [
'<?php
$a = getenv();
$b = getenv("some_key");',
'assertions' => [
'$a' => 'array<array-key, string>',
'$b' => 'false|string',
],
],
'arrayPopNotNullable' => [
'<?php
function expectsInt(int $a) : void {}
/**
* @param array<array-key, array{item:int}> $list
*/
function test(array $list) : void
{
while (!empty($list)) {
$tmp = array_pop($list);
expectsInt($tmp["item"]);
}
}',
],
'arrayFilterWithAssert' => [
'<?php
$a = array_filter(
[1, "hello", 6, "goodbye"],
function ($s): bool {
return is_string($s);
}
);',
'assertions' => [
'$a' => 'array<int, string>',
],
'error_levels' => [
'MissingClosureParamType',
],
],
'arrayFilterUseKey' => [
'<?php
$bar = "bar";
$foo = [
$bar => function (): string {
return "baz";
},
];
$foo = array_filter(
$foo,
function (string $key): bool {
return $key === "bar";
},
ARRAY_FILTER_USE_KEY
);',
'assertions' => [
'$foo' => 'array<string, Closure():string(baz)>',
],
],
'ignoreFalsableCurrent' => [
'<?php
/** @param string[] $arr */
function foo(array $arr): string {
return current($arr);
}
/** @param string[] $arr */
function bar(array $arr): string {
$a = current($arr);
if ($a === false) {
return "hello";
}
return $a;
}
/**
* @param string[] $arr
* @return false|string
*/
function bat(array $arr) {
return current($arr);
}',
],
'ignoreFalsableFileGetContents' => [
'<?php
function foo(string $s): string {
return file_get_contents($s);
}
function bar(string $s): string {
$a = file_get_contents($s);
if ($a === false) {
return "hello";
}
return $a;
}
/**
* @return false|string
*/
function bat(string $s) {
return file_get_contents($s);
}',
],
'arraySumEmpty' => [
'<?php
$foo = array_sum([]) + 1;',
'assertions' => [
'$foo' => 'float|int',
],
],
'arrayMapObjectLikeAndCallable' => [
'<?php
/**
* @psalm-return array{key1:int,key2:int}
*/
function foo(): array {
$v = ["key1"=> 1, "key2"=> "2"];
$r = array_map("intval", $v);
return $r;
}',
],
'arrayMapObjectLikeAndClosure' => [
'<?php
/**
* @psalm-return array{key1:int,key2:int}
*/
function foo(): array {
$v = ["key1"=> 1, "key2"=> "2"];
$r = array_map(function($i) : int { return intval($i);}, $v);
return $r;
}',
'assertions' => [],
'error_levels' => [
'MissingClosureParamType',
'MixedTypeCoercion',
],
],
'arrayFilterGoodArgs' => [
'<?php
function fooFoo(int $i) : bool {
return true;
}
class A {
public static function barBar(int $i) : bool {
return true;
}
}
array_filter([1, 2, 3], "fooFoo");
array_filter([1, 2, 3], "foofoo");
array_filter([1, 2, 3], "FOOFOO");
array_filter([1, 2, 3], "A::barBar");
array_filter([1, 2, 3], "A::BARBAR");
array_filter([1, 2, 3], "A::barbar");',
],
'arrayFilterIgnoreMissingClass' => [
'<?php
array_filter([1, 2, 3], "A::bar");',
'assertions' => [],
'error_levels' => ['UndefinedClass'],
],
'arrayFilterIgnoreMissingMethod' => [
'<?php
class A {
public static function bar(int $i) : bool {
return true;
}
}
array_filter([1, 2, 3], "A::foo");',
'assertions' => [],
'error_levels' => ['UndefinedMethod'],
],
'validCallables' => [
'<?php
class A {
public static function b() : void {}
}
function c() : void {}
["a", "b"]();
"A::b"();
"c"();',
],
'arrayMapParamDefault' => [
'<?php
$arr = ["a", "b"];
array_map("mapdef", $arr, array_fill(0, count($arr), 1));
function mapdef(string $_a, int $_b = 0): string {
return "a";
}',
],
'noInvalidOperandForCoreFunctions' => [
'<?php
function foo(string $a, string $b) : int {
$aTime = strtotime($a);
$bTime = strtotime($b);
return $aTime - $bTime;
}',
],
'strposIntSecondParam' => [
'<?php
function hasZeroByteOffset(string $s) : bool {
return strpos($s, 0) !== false;
}',
],
'functionCallInGlobalScope' => [
'<?php
$a = function() use ($argv) : void {};',
],
'implodeMultiDimensionalArray' => [
'<?php
$urls = array_map("implode", [["a", "b"]]);',
],
'varExport' => [
'<?php
$a = var_export(["a"], true);',
'assertions' => [
'$a' => 'string',
],
],
'varExportConstFetch' => [
'<?php
class Foo {
const BOOL_VAR_EXPORT_RETURN = true;
/**
* @param mixed $mixed
*/
public static function Baz($mixed) : string {
return var_export($mixed, self::BOOL_VAR_EXPORT_RETURN);
}
}',
],
'key' => [
'<?php
$a = ["one" => 1, "two" => 3];
$b = key($a);
$c = $a[$b];',
'assertions' => [
'$b' => 'null|string',
'$c' => 'int',
],
],
'explode' => [
'<?php
/** @var string $string */
$elements = explode(" ", $string);',
'assertions' => [
'$elements' => 'non-empty-list<string>',
],
],
'explodeWithPositiveLimit' => [
'<?php
/** @var string $string */
$elements = explode(" ", $string, 5);',
'assertions' => [
'$elements' => 'non-empty-list<string>',
],
],
'explodeWithNegativeLimit' => [
'<?php
/** @var string $string */
$elements = explode(" ", $string, -5);',
'assertions' => [
'$elements' => 'list<string>',
],
],
'explodeWithDynamicLimit' => [
'<?php
/**
* @var string $string
* @var int $limit
*/
$elements = explode(" ", $string, $limit);',
'assertions' => [
'$elements' => 'list<string>',
],
],
'explodeWithDynamicDelimiter' => [
'<?php
/**
* @var string $delim
* @var string $string
*/
$elements = explode($delim, $string);',
'assertions' => [
'$elements' => 'false|non-empty-list<string>',
],
],
'explodeWithDynamicDelimiterAndPositiveLimit' => [
'<?php
/**
* @var string $delim
* @var string $string
*/
$elements = explode($delim, $string, 5);',
'assertions' => [
'$elements' => 'false|non-empty-list<string>',
],
],
'explodeWithDynamicDelimiterAndNegativeLimit' => [
'<?php
/**
* @var string $delim
* @var string $string
*/
$elements = explode($delim, $string, -5);',
'assertions' => [
'$elements' => 'false|list<string>',
],
],
'explodeWithDynamicDelimiterAndLimit' => [
'<?php
/**
* @var string $delim
* @var string $string
* @var int $limit
*/
$elements = explode($delim, $string, $limit);',
'assertions' => [
'$elements' => 'false|list<string>',
],
],
'explodeWithPossiblyFalse' => [
'<?php
/** @return non-empty-list<string> */
function exploder(string $d, string $s) : array {
return explode($d, $s);
}',
],
'allowPossiblyUndefinedClassInClassExists' => [
'<?php
if (class_exists(Foo::class)) {}',
],
'allowConstructorAfterClassExists' => [
'<?php
function foo(string $s) : void {
if (class_exists($s)) {
new $s();
}
}',
'assertions' => [],
'error_levels' => ['MixedMethodCall'],
],
'next' => [
'<?php
$arr = ["one", "two", "three"];
$n = next($arr);',
'assertions' => [
'$n' => 'false|string',
],
],
'iteratorToArray' => [
'<?php
/**
* @return Generator<stdClass>
*/
function generator(): Generator {
yield new stdClass;
}
$a = iterator_to_array(generator());',
'assertions' => [
'$a' => 'array<mixed, stdClass>',
],
],
'iteratorToArrayWithGetIterator' => [
'<?php
class C implements IteratorAggregate {
/**
* @return Traversable<int,string>
*/
public function getIterator() {
yield 1 => "1";
}
}
$a = iterator_to_array(new C);',
'assertions' => [
'$a' => 'array<int, string>',
],
],
'iteratorToArrayWithGetIteratorReturningList' => [
'<?php
class C implements IteratorAggregate {
/**
* @return Traversable<int,string>
*/
public function getIterator() {
yield 1 => "1";
}
}
$a = iterator_to_array(new C, false);',
'assertions' => [
'$a' => 'list<string>',
],
],
'arrayColumnInference' => [
'<?php
function makeMixedArray(): array { return []; }
/** @return array<array<int,bool>> */
function makeGenericArray(): array { return []; }
/** @return array<array{0:string}> */
function makeShapeArray(): array { return []; }
/** @return array<array{0:string}|int> */
function makeUnionArray(): array { return []; }
$a = array_column([[1], [2], [3]], 0);
$b = array_column([["a" => 1], ["a" => 2], ["a" => 3]], "a");
$c = array_column([["k" => "a", "v" => 1], ["k" => "b", "v" => 2]], "v", "k");
$d = array_column([], 0);
$e = array_column(makeMixedArray(), 0);
$f = array_column(makeGenericArray(), 0);
$g = array_column(makeShapeArray(), 0);
$h = array_column(makeUnionArray(), 0);
',
'assertions' => [
'$a' => 'array<array-key, int>',
'$b' => 'array<array-key, int>',
'$c' => 'array<string, int>',
'$d' => 'array<array-key, mixed>',
'$e' => 'array<array-key, mixed>',
'$f' => 'array<array-key, mixed>',
'$g' => 'array<array-key, string>',
'$h' => 'array<array-key, mixed>',
],
],
'strtrWithPossiblyFalseFirstArg' => [
'<?php
/**
* @param false|string $str
* @param array<string, string> $replace_pairs
* @return string
*/
function strtr_wrapper($str, array $replace_pairs) {
/** @psalm-suppress PossiblyFalseArgument */
return strtr($str, $replace_pairs);
}',
],
'splatArrayIntersect' => [
'<?php
$foo = [
[1, 2, 3],
[1, 2],
];
$bar = array_intersect(... $foo);',
'assertions' => [
'$bar' => 'array<int, int>',
],
],
'arrayIntersectIsVariadic' => [
'<?php
array_intersect([], [], [], [], []);',
'assertions' => [],
],
'arrayIntersectKeyIsVariadic' => [
'<?php
array_intersect_key([], [], [], [], []);',
'assertions' => [],
],
'arrayReduce' => [
'<?php
$arr = [2, 3, 4, 5];
function multiply (int $carry, int $item) : int {
return $carry * $item;
}
$f2 = function (int $carry, int $item) : int {
return $carry * $item;
};
$direct_closure_result = array_reduce(
$arr,
function (int $carry, int $item) : int {
return $carry * $item;
},
1
);
$passed_closure_result = array_reduce(
$arr,
$f2,
1
);
$function_call_result = array_reduce(
$arr,
"multiply",
1
);',
'assertions' => [
'$direct_closure_result' => 'int',
'$passed_closure_result' => 'int',
'$function_call_result' => 'int',
],
],
'arrayReduceMixedReturn' => [
'<?php
$arr = [2, 3, 4, 5];
$direct_closure_result = array_reduce(
$arr,
function (int $carry, int $item) {
return $_GET["boo"];
},
1
);',
'assertions' => [],
'error_levels' => ['MissingClosureReturnType', 'MixedAssignment'],
],
'versionCompare' => [
'<?php
function getString() : string {
return rand(0, 1) ? "===" : "==";
}
$a = version_compare("5.0.0", "7.0.0");
$b = version_compare("5.0.0", "7.0.0", "==");
$c = version_compare("5.0.0", "7.0.0", getString());
',
'assertions' => [
'$a' => 'int',
'$b' => 'bool',
'$c' => 'bool|null',
],
],
'getTimeOfDay' => [
'<?php
$a = gettimeofday(true) - gettimeofday(true);
$b = gettimeofday();
$c = gettimeofday(false);',
'assertions' => [
'$a' => 'float',
'$b' => 'array<string, int>',
'$c' => 'array<string, int>',
],
],
'parseUrlArray' => [
'<?php
function foo(string $s) : string {
return parse_url($s)["host"] ?? "";
}
function bar(string $s) : string {
$parsed = parse_url($s);
return $parsed["host"];
}
function baz(string $s) : string {
$parsed = parse_url($s);
return $parsed["host"];
}
function bag(string $s) : string {
$parsed = parse_url($s);
if (is_string($parsed["host"] ?? false)) {
return $parsed["host"];
}
return "";
}
function hereisanotherone(string $s) : string {
$parsed = parse_url($s);
if (isset($parsed["host"]) && is_string($parsed["host"])) {
return $parsed["host"];
}
return "";
}
function hereisthelastone(string $s) : string {
$parsed = parse_url($s);
if (isset($parsed["host"]) && is_string($parsed["host"])) {
return $parsed["host"];
}
return "";
}
function portisint(string $s) : int {
$parsed = parse_url($s);
if (isset($parsed["port"])) {
return $parsed["port"];
}
return 80;
}
function portismaybeint(string $s) : ? int {
$parsed = parse_url($s);
return $parsed["port"] ?? null;
}
$porta = parse_url("", PHP_URL_PORT);
$porte = parse_url("localhost:443", PHP_URL_PORT);',
'assertions' => [
'$porta' => 'int|null',
'$porte' => 'int|null',
],
'error_levels' => ['MixedReturnStatement', 'MixedInferredReturnType'],
],
'parseUrlComponent' => [
'<?php
function foo(string $s) : string {
return parse_url($s, PHP_URL_HOST) ?? "";
}
function bar(string $s) : string {
return parse_url($s, PHP_URL_HOST);
}
function bag(string $s) : string {
$host = parse_url($s, PHP_URL_HOST);
if (is_string($host)) {
return $host;
}
return "";
}',
],
'triggerUserError' => [
'<?php
function mightLeave() : string {
if (rand(0, 1)) {
trigger_error("bad", E_USER_ERROR);
} else {
return "here";
}
}',
],
'getParentClass' => [
'<?php
class A {}
class B extends A {}
$b = get_parent_class(new A());
if ($b === false) {}
$c = new $b();',
'assertions' => [],
'error_levels' => ['MixedMethodCall'],
],
'arraySplice' => [
'<?php
$a = [1, 2, 3];
$c = $a;
$b = ["a", "b", "c"];
array_splice($a, -1, 1, $b);
$d = [1, 2, 3];
array_splice($d, -1, 1);',
'assertions' => [
'$a' => 'non-empty-array<int, int|string>',
'$b' => 'array{0: string, 1: string, 2: string}',
'$c' => 'array{0: int, 1: int, 2: int}',
],
],
'arraySpliceOtherType' => [
'<?php
$d = [["red"], ["green"], ["blue"]];
array_splice($d, -1, 1, "foo");',
'assertions' => [
'$d' => 'array<int, array{0: string}|string>',
],
],
'ksortPreserveShape' => [
'<?php
$a = ["a" => 3, "b" => 4];
ksort($a);
acceptsAShape($a);
/**
* @param array{a:int,b:int} $a
*/
function acceptsAShape(array $a): void {}',
],
'suppressError' => [
'<?php
$a = @file_get_contents("foo");',
'assertions' => [
'$a' => 'false|string',
],
],
'arraySlicePreserveKeys' => [
'<?php
$a = ["a" => 1, "b" => 2, "c" => 3];
$b = array_slice($a, 1, 2, true);
$c = array_slice($a, 1, 2, false);
$d = array_slice($a, 1, 2);',
'assertions' => [
'$b' => 'array<string, int>',
'$c' => 'array<string, int>',
'$d' => 'array<string, int>',
],
],
'arraySliceDontPreserveIntKeys' => [
'<?php
$a = [1 => "a", 4 => "b", 3 => "c"];
$b = array_slice($a, 1, 2, true);
$c = array_slice($a, 1, 2, false);
$d = array_slice($a, 1, 2);',
'assertions' => [
'$b' => 'array<int, string>',
'$c' => 'list<string>',
'$d' => 'list<string>',
],
],
'printrOutput' => [
'<?php
function foo(string $s) : void {
echo $s;
}
foo(print_r(1, true));',
],
'microtime' => [
'<?php
$a = microtime(true);
$b = microtime();
/** @psalm-suppress InvalidScalarArgument */
$c = microtime(1);
$d = microtime(false);',
'assertions' => [
'$a' => 'float',
'$b' => 'string',
'$c' => 'float|string',
'$d' => 'string',
],
],
'filterVar' => [
'<?php
function filterInt(string $s) : int {
$filtered = filter_var($s, FILTER_VALIDATE_INT);
if ($filtered === false) {
return 0;
}
return $filtered;
}
function filterNullableInt(string $s) : ?int {
return filter_var($s, FILTER_VALIDATE_INT, ["options" => ["default" => null]]);
}
function filterIntWithDefault(string $s) : int {
return filter_var($s, FILTER_VALIDATE_INT, ["options" => ["default" => 5]]);
}
function filterBool(string $s) : bool {
return filter_var($s, FILTER_VALIDATE_BOOLEAN);
}
function filterNullableBool(string $s) : ?bool {
return filter_var($s, FILTER_VALIDATE_BOOLEAN, FILTER_NULL_ON_FAILURE);
}
function filterNullableBoolWithFlagsArray(string $s) : ?bool {
return filter_var($s, FILTER_VALIDATE_BOOLEAN, ["flags" => FILTER_NULL_ON_FAILURE]);
}
function filterFloat(string $s) : float {
$filtered = filter_var($s, FILTER_VALIDATE_FLOAT);
if ($filtered === false) {
return 0.0;
}
return $filtered;
}
function filterFloatWithDefault(string $s) : float {
return filter_var($s, FILTER_VALIDATE_FLOAT, ["options" => ["default" => 5.0]]);
}',
],
'callVariableVar' => [
'<?php
class Foo
{
public static function someInt(): int
{
return 1;
}
}
/**
* @return int
*/
function makeInt()
{
$fooClass = Foo::class;
return $fooClass::someInt();
}',
],
'expectsIterable' => [
'<?php
function foo(iterable $i) : void {}
function bar(array $a) : void {
foo($a);
}',
],
'getTypeHasValues' => [
'<?php
/**
* @param mixed $maybe
*/
function matchesTypes($maybe) : void {
$t = gettype($maybe);
if ($t === "object") {}
}',
],
'functionResolutionInNamespace' => [
'<?php
namespace Foo;
function sort(int $_) : void {}
sort(5);',
],
'rangeWithIntStep' => [
'<?php
function foo(int $bar) : string {
return (string) $bar;
}
foreach (range(1, 10, 1) as $x) {
foo($x);
}',
],
'rangeWithNoStep' => [
'<?php
function foo(int $bar) : string {
return (string) $bar;
}
foreach (range(1, 10) as $x) {
foo($x);
}',
],
'rangeWithNoStepAndString' => [
'<?php
function foo(string $bar) : void {}
foreach (range("a", "z") as $x) {
foo($x);
}',
],
'rangeWithFloatStep' => [
'<?php
function foo(float $bar) : string {
return (string) $bar;
}
foreach (range(1, 10, .3) as $x) {
foo($x);
}',
],
'rangeWithFloatStart' => [
'<?php
function foo(float $bar) : string {
return (string) $bar;
}
foreach (range(1.5, 10) as $x) {
foo($x);
}',
],
'duplicateNamespacedFunction' => [
'<?php
namespace Bar;
function sort() : void {}',
],
'arrayMapAfterFunctionMissingFile' => [
'<?php
require_once(FOO);
$urls = array_map("strval", [1, 2, 3]);',
[],
'error_levels' => ['UndefinedConstant', 'UnresolvableInclude'],
],
'noNamespaceClash' => [
'<?php
namespace FunctionNamespace {
function foo() : void {}
}
namespace ClassNamespace {
class Foo {}
}
namespace {
use ClassNamespace\Foo;
use function FunctionNamespace\foo;
new Foo();
foo();
}',
],
'round' => [
'<?php
$a = round(4.6);
$b = round(3.6, 0);
$c = round(3.0, 1);
$d = round(3.1, 2);
/** @var int */
$sig = 1;
$e = round(3.1, $sig);',
'assertions' => [
'$a' => 'int',
'$b' => 'int',
'$c' => 'float',
'$d' => 'float',
'$e' => 'float|int',
],
],
'hashInit70' => [
'<?php
$h = hash_init("sha256");',
[
'$h' => 'resource',
],
[],
'7.1',
],
'hashInit71' => [
'<?php
$h = hash_init("sha256");',
[
'$h' => 'resource',
],
[],
'7.1',
],
'hashInit72' => [
'<?php
$h = hash_init("sha256");',
[
'$h' => 'HashContext',
],
[],
'7.2',
],
'hashInit73' => [
'<?php
$h = hash_init("sha256");',
[
'$h' => 'HashContext',
],
[],
'7.3',
],
'nullableByRef' => [
'<?php
function foo(?string &$s) : void {}
function bar() : void {
foo($bar);
}',
],
'getClassNewInstance' => [
'<?php
interface I {}
class C implements I {}
class Props {
/** @var class-string<I>[] */
public $arr = [];
}
(new Props)->arr[] = get_class(new C);',
],
'getClassVariable' => [
'<?php
interface I {}
class C implements I {}
$c_instance = new C;
class Props {
/** @var class-string<I>[] */
public $arr = [];
}
(new Props)->arr[] = get_class($c_instance);',
],
'getClassAnonymousNewInstance' => [
'<?php
interface I {}
class Props {
/** @var class-string<I>[] */
public $arr = [];
}
(new Props)->arr[] = get_class(new class implements I{});',
],
'getClassAnonymousVariable' => [
'<?php
interface I {}
$anon_instance = new class implements I {};
class Props {
/** @var class-string<I>[] */
public $arr = [];
}
(new Props)->arr[] = get_class($anon_instance);',
],
'arrayReversePreserveNonEmptiness' => [
'<?php
/** @param string[] $arr */
function getOrderings(array $arr): int {
if ($arr) {
$next = null;
foreach (array_reverse($arr) as $v) {
$next = 1;
}
return $next;
}
return 2;
}',
],
'mktime' => [
'<?php
/** @psalm-suppress InvalidScalarArgument */
$a = mktime("foo");
/** @psalm-suppress MixedArgument */
$b = mktime($_GET["foo"]);
$c = mktime(1, 2, 3);',
'assertions' => [
'$a' => 'false|int',
'$b' => 'false|int',
'$c' => 'int',
],
],
'PHP73-hrtime' => [
'<?php
$a = hrtime(true);
$b = hrtime();
/** @psalm-suppress InvalidScalarArgument */
$c = hrtime(1);
$d = hrtime(false);',
'assertions' => [
'$a' => 'int',
'$b' => 'array{0: int, 1: int}',
'$c' => 'array{0: int, 1: int}|int',
'$d' => 'array{0: int, 1: int}',
],
],
'PHP73-hrtimeCanBeFloat' => [
'<?php
$a = hrtime(true);
if (is_int($a)) {}
if (is_float($a)) {}',
],
'min' => [
'<?php
$a = min(0, 1);
$b = min([0, 1]);
$c = min("a", "b");
$d = min(1, 2, 3, 4);
$e = min(1, 2, 3, 4, 5);
$f = min(...[1, 2, 3]);',
'assertions' => [
'$a' => 'int',
'$b' => 'int',
'$c' => 'string',
'$d' => 'int',
'$e' => 'int',
'$f' => 'int',
],
],
'minUnpackedArg' => [
'<?php
$f = min(...[1, 2, 3]);',
'assertions' => [
'$f' => 'int',
],
],
'sscanf' => [
'<?php
sscanf("10:05:03", "%d:%d:%d", $hours, $minutes, $seconds);',
'assertions' => [
'$hours' => 'float|int|string',
'$minutes' => 'float|int|string',
'$seconds' => 'float|int|string',
],
],
'inferArrayMapReturnType' => [
'<?php
/** @return array<string> */
function Foo(DateTime ...$dateTimes) : array {
return array_map(
function ($dateTime) {
return (string) ($dateTime->format("c"));
},
$dateTimes
);
}',
],
'noImplicitAssignmentToStringFromMixedWithDocblockTypes' => [
'<?php
/** @param string $s */
function takesString($s) : void {}
function takesInt(int $i) : void {}
/**
* @param mixed $s
* @psalm-suppress MixedArgument
*/
function bar($s) : void {
takesString($s);
takesInt($s);
}',
],
'ignoreNullableIssuesAfterMixedCoercion' => [
'<?php
function takesNullableString(?string $s) : void {}
function takesString(string $s) : void {}
/**
* @param mixed $s
* @psalm-suppress MixedArgument
*/
function bar($s) : void {
takesNullableString($s);
takesString($s);
}',
],
'countableSimpleXmlElement' => [
'<?php
$xml = new SimpleXMLElement("<?xml version=\"1.0\"?><a><b></b><b></b></a>");
echo count($xml);',
],
'countableCallableArray' => [
'<?php
/** @param callable|false $x */
function example($x) : void {
if (is_array($x)) {
echo "Count is: " . count($x);
}
}'
],
'refineWithTraitExists' => [
'<?php
function foo(string $s) : void {
if (trait_exists($s)) {
new ReflectionClass($s);
}
}',
],
'refineWithClassExistsOrTraitExists' => [
'<?php
function foo(string $s) : void {
if (trait_exists($s) || class_exists($s)) {
new ReflectionClass($s);
}
}
function bar(string $s) : void {
if (class_exists($s) || trait_exists($s)) {
new ReflectionClass($s);
}
}
function baz(string $s) : void {
if (class_exists($s) || interface_exists($s) || trait_exists($s)) {
new ReflectionClass($s);
}
}',
],
'minSingleArg' => [
'<?php
/** @psalm-suppress TooFewArguments */
min(0);',
],
'PHP73-allowIsCountableToInformType' => [
'<?php
function getObject() : iterable{
return [];
}
$iterableObject = getObject();
if (is_countable($iterableObject)) {
if (count($iterableObject) === 0) {}
}',
],
'versionCompareAsCallable' => [
'<?php
$a = ["1.0", "2.0"];
uksort($a, "version_compare");',
],
'coerceToObjectAfterBeingCalled' => [
'<?php
class Foo {
public function bar() : void {}
}
function takesFoo(Foo $foo) : void {}
/** @param mixed $f */
function takesMixed($f) : void {
if (rand(0, 1)) {
$f = new Foo();
}
/** @psalm-suppress MixedArgument */
takesFoo($f);
$f->bar();
}',
],
'functionExists' => [
'<?php
if (!function_exists("in_array")) {
function in_array($a, $b) {
return true;
}
}',
],
'pregMatch' => [
'<?php
function takesInt(int $i) : void {}
takesInt(preg_match("{foo}", "foo"));',
],
'pregMatchWithMatches' => [
'<?php
/** @param string[] $matches */
function takesMatches(array $matches) : void {}
preg_match("{foo}", "foo", $matches);
takesMatches($matches);',
],
'pregMatchWithOffset' => [
'<?php
/** @param string[] $matches */
function takesMatches(array $matches) : void {}
preg_match("{foo}", "foo", $matches, 0, 10);
takesMatches($matches);',
],
'pregMatchWithFlags' => [
'<?php
function takesInt(int $i) : void {}
if (preg_match("{foo}", "this is foo", $matches, PREG_OFFSET_CAPTURE)) {
/**
* @psalm-suppress MixedArrayAccess
* @psalm-suppress MixedArgument
*/
takesInt($matches[0][1]);
}',
],
'pregReplaceCallback' => [
'<?php
function foo(string $s) : string {
return preg_replace_callback(
\'/<files (psalm-version="[^"]+") (?:php-version="(.+)">\n)/\',
/** @param array<int, string> $matches */
function (array $matches) : string {
return $matches[1];
},
$s
);
}',
],
'compactDefinedVariable' => [
'<?php
function foo(int $a, string $b, bool $c) : array {
return compact("a", "b", "c");
}',
],
'PHP73-setCookiePhp73' => [
'<?php
setcookie(
"name",
"value",
[
"path" => "/",
"expires" => 0,
"httponly" => true,
"secure" => true,
"samesite" => "Lax"
]
);',
],
'printrBadArg' => [
'<?php
/** @psalm-suppress InvalidScalarArgument */
$a = print_r([], 1);
echo $a;',
],
'dontCoerceCallMapArgs' => [
'<?php
function getStr() : ?string {
return rand(0,1) ? "test" : null;
}
function test() : void {
$g = getStr();
/** @psalm-suppress PossiblyNullArgument */
$x = strtoupper($g);
$c = "prefix " . (strtoupper($g ?? "") === "x" ? "xa" : "ya");
echo "$x, $c\n";
}'
],
'mysqliRealConnectFunctionAllowsNullParameters' => [
'<?php
$mysqli = mysqli_init();
mysqli_real_connect($mysqli, null, \'test\', null);',
],
];
}
/**
* @return iterable<string,array{string,error_message:string,2?:string[],3?:bool,4?:string}>
*/
public function providerInvalidCodeParse()
{
return [
'arrayFilterWithoutTypes' => [
'<?php
$e = array_filter(
["a" => 5, "b" => 12, "c" => null],
function(?int $i) {
return $_GET["a"];
}
);',
'error_message' => 'MixedArgumentTypeCoercion',
'error_levels' => ['MissingClosureParamType', 'MissingClosureReturnType'],
],
'arrayFilterUseMethodOnInferrableInt' => [
'<?php
$a = array_filter([1, 2, 3, 4], function ($i) { return $i->foo(); });',
'error_message' => 'InvalidMethodCall',
],
'arrayMapUseMethodOnInferrableInt' => [
'<?php
$a = array_map(function ($i) { return $i->foo(); }, [1, 2, 3, 4]);',
'error_message' => 'InvalidMethodCall',
],
'invalidScalarArgument' => [
'<?php
function fooFoo(int $a): void {}
fooFoo("string");',
'error_message' => 'InvalidScalarArgument',
],
'invalidArgumentWithDeclareStrictTypes' => [
'<?php declare(strict_types=1);
function fooFoo(int $a): void {}
fooFoo("string");',
'error_message' => 'InvalidArgument',
],
'builtinFunctioninvalidArgumentWithWeakTypes' => [
'<?php
$s = substr(5, 4);',
'error_message' => 'InvalidScalarArgument',
],
'builtinFunctioninvalidArgumentWithDeclareStrictTypes' => [
'<?php declare(strict_types=1);
$s = substr(5, 4);',
'error_message' => 'InvalidArgument',
],
'builtinFunctioninvalidArgumentWithDeclareStrictTypesInClass' => [
'<?php declare(strict_types=1);
class A {
public function foo() : void {
$s = substr(5, 4);
}
}',
'error_message' => 'InvalidArgument',
],
'mixedArgument' => [
'<?php
function fooFoo(int $a): void {}
/** @var mixed */
$a = "hello";
fooFoo($a);',
'error_message' => 'MixedArgument',
'error_levels' => ['MixedAssignment'],
],
'nullArgument' => [
'<?php
function fooFoo(int $a): void {}
fooFoo(null);',
'error_message' => 'NullArgument',
],
'tooFewArguments' => [
'<?php
function fooFoo(int $a): void {}
fooFoo();',
'error_message' => 'TooFewArguments',
],
'tooManyArguments' => [
'<?php
function fooFoo(int $a): void {}
fooFoo(5, "dfd");',
'error_message' => 'TooManyArguments - src' . DIRECTORY_SEPARATOR . 'somefile.php:3:21 - Too many arguments for method fooFoo '
. '- expecting 1 but saw 2',
],
'tooManyArgumentsForConstructor' => [
'<?php
class A { }
new A("hello");',
'error_message' => 'TooManyArguments',
],
'typeCoercion' => [
'<?php
class A {}
class B extends A{}
function fooFoo(B $b): void {}
fooFoo(new A());',
'error_message' => 'ArgumentTypeCoercion',
],
'arrayTypeCoercion' => [
'<?php
class A {}
class B extends A{}
/**
* @param B[] $b
* @return void
*/
function fooFoo(array $b) {}
fooFoo([new A()]);',
'error_message' => 'ArgumentTypeCoercion',
],
'duplicateParam' => [
'<?php
/**
* @return void
*/
function f($p, $p) {}',
'error_message' => 'DuplicateParam',
'error_levels' => ['MissingParamType'],
],
'invalidParamDefault' => [
'<?php
function f(int $p = false) {}',
'error_message' => 'InvalidParamDefault',
],
'invalidDocblockParamDefault' => [
'<?php
/**
* @param int $p
* @return void
*/
function f($p = false) {}',
'error_message' => 'InvalidParamDefault',
],
'badByRef' => [
'<?php
function fooFoo(string &$v): void {}
fooFoo("a");',
'error_message' => 'InvalidPassByReference',
],
'badArrayByRef' => [
'<?php
function fooFoo(array &$a): void {}
fooFoo([1, 2, 3]);',
'error_message' => 'InvalidPassByReference',
],
'invalidArgAfterCallable' => [
'<?php
/**
* @param callable $callback
* @return void
*/
function route($callback) {
if (!is_callable($callback)) { }
takes_int("string");
}
function takes_int(int $i) {}',
'error_message' => 'InvalidScalarArgument',
'error_levels' => [
'MixedAssignment',
'MixedArrayAccess',
'RedundantConditionGivenDocblockType',
],
],
'undefinedFunctionInArrayMap' => [
'<?php
array_map(
"undefined_function",
[1, 2, 3]
);',
'error_message' => 'UndefinedFunction',
],
'arrayMapWithNonCallableStringArray' => [
'<?php
$foo = ["one", "two"];
array_map($foo, ["hello"]);',
'error_message' => 'InvalidArgument',
],
'arrayMapWithNonCallableIntArray' => [
'<?php
$foo = [1, 2];
array_map($foo, ["hello"]);',
'error_message' => 'InvalidArgument',
],
'objectLikeKeyChecksAgainstDifferentGeneric' => [
'<?php
/**
* @param array<string, int> $b
*/
function a($b): int
{
return $b["a"];
}
a(["a" => "hello"]);',
'error_message' => 'InvalidScalarArgument',
],
'objectLikeKeyChecksAgainstDifferentObjectLike' => [
'<?php
/**
* @param array{a: int} $b
*/
function a($b): int
{
return $b["a"];
}
a(["a" => "hello"]);',
'error_message' => 'InvalidArgument',
],
'possiblyNullFunctionCall' => [
'<?php
$a = rand(0, 1) ? function(): void {} : null;
$a();',
'error_message' => 'PossiblyNullFunctionCall',
],
'possiblyInvalidFunctionCall' => [
'<?php
$a = rand(0, 1) ? function(): void {} : 23515;
$a();',
'error_message' => 'PossiblyInvalidFunctionCall',
],
'arrayFilterBadArgs' => [
'<?php
function foo(int $i) : bool {
return true;
}
array_filter(["hello"], "foo");',
'error_message' => 'InvalidScalarArgument',
],
'arrayFilterTooFewArgs' => [
'<?php
function foo(int $i, string $s) : bool {
return true;
}
array_filter([1, 2, 3], "foo");',
'error_message' => 'TooFewArguments',
],
'arrayMapBadArgs' => [
'<?php
function foo(int $i) : bool {
return true;
}
array_map("foo", ["hello"]);',
'error_message' => 'InvalidScalarArgument',
],
'arrayMapTooFewArgs' => [
'<?php
function foo(int $i, string $s) : bool {
return true;
}
array_map("foo", [1, 2, 3]);',
'error_message' => 'TooFewArguments',
],
'arrayMapTooManyArgs' => [
'<?php
function foo() : bool {
return true;
}
array_map("foo", [1, 2, 3]);',
'error_message' => 'TooManyArguments',
],
'varExportAssignmentToVoid' => [
'<?php
$a = var_export(["a"]);',
'error_message' => 'AssignmentToVoid',
],
'explodeWithEmptyString' => [
'<?php
function exploder(string $s) : array {
return explode("", $s);
}',
'error_message' => 'FalsableReturnStatement',
],
'complainAboutArrayToIterable' => [
'<?php
class A {}
class B {}
/**
* @param iterable<mixed,A> $p
*/
function takesIterableOfA(iterable $p): void {}
takesIterableOfA([new B]); // should complain',
'error_message' => 'InvalidArgument',
],
'complainAboutArrayToIterableSingleParam' => [
'<?php
class A {}
class B {}
/**
* @param iterable<A> $p
*/
function takesIterableOfA(iterable $p): void {}
takesIterableOfA([new B]); // should complain',
'error_message' => 'InvalidArgument',
],
'putInvalidTypeMessagesFirst' => [
'<?php
$q = rand(0,1) ? new stdClass : false;
strlen($q);',
'error_message' => 'InvalidArgument',
],
'arrayReduceInvalidClosureTooFewArgs' => [
'<?php
$arr = [2, 3, 4, 5];
$direct_closure_result = array_reduce(
$arr,
function (int $carry) : int {
return 5;
},
1
);',
'error_message' => 'InvalidArgument',
'error_levels' => ['MixedTypeCoercion'],
],
'arrayReduceInvalidItemType' => [
'<?php
$arr = [2, 3, 4, 5];
$direct_closure_result = array_reduce(
$arr,
function (int $carry, stdClass $item) {
return $_GET["boo"];
},
1
);',
'error_message' => 'InvalidArgument',
'error_levels' => ['MissingClosureReturnType'],
],
'arrayReduceInvalidCarryType' => [
'<?php
$arr = [2, 3, 4, 5];
$direct_closure_result = array_reduce(
$arr,
function (stdClass $carry, int $item) {
return $_GET["boo"];
},
1
);',
'error_message' => 'InvalidArgument',
'error_levels' => ['MissingClosureReturnType'],
],
'arrayReduceInvalidCarryOutputType' => [
'<?php
$arr = [2, 3, 4, 5];
$direct_closure_result = array_reduce(
$arr,
function (int $carry, int $item) : stdClass {
return new stdClass;
},
1
);',
'error_message' => 'InvalidArgument',
],
'arrayPopNotNull' => [
'<?php
function expectsInt(int $a) : void {}
/**
* @param array<array-key, array{item:int}> $list
*/
function test(array $list) : void
{
while (!empty($list)) {
$tmp = array_pop($list);
if ($tmp === null) {}
}
}',
'error_message' => 'DocblockTypeContradiction',
],
'getTypeInvalidValue' => [
'<?php
/**
* @param mixed $maybe
*/
function matchesTypes($maybe) : void {
$t = gettype($maybe);
if ($t === "bool") {}
}',
'error_message' => 'TypeDoesNotContainType',
],
'rangeWithFloatStep' => [
'<?php
function foo(int $bar) : string {
return (string) $bar;
}
foreach (range(1, 10, .3) as $x) {
foo($x);
}',
'error_message' => 'InvalidScalarArgument',
],
'rangeWithFloatStart' => [
'<?php
function foo(int $bar) : string {
return (string) $bar;
}
foreach (range(1.4, 10) as $x) {
foo($x);
}',
'error_message' => 'InvalidScalarArgument',
],
'duplicateFunction' => [
'<?php
function f() : void {}
function f() : void {}',
'error_message' => 'DuplicateFunction',
],
'duplicateCoreFunction' => [
'<?php
function sort() : void {}',
'error_message' => 'DuplicateFunction',
],
'usortInvalidComparison' => [
'<?php
$arr = [["one"], ["two"], ["three"]];
usort(
$arr,
function (string $a, string $b): int {
return strcmp($a, $b);
}
);',
'error_message' => 'InvalidArgument',
],
'usortInvalidCallableString' => [
'<?php
$a = [[1], [2], [3]];
usort($a, "strcmp");',
'error_message' => 'InvalidArgument',
],
'functionCallOnMixed' => [
'<?php
/**
* @var mixed $s
* @psalm-suppress MixedAssignment
*/
$s = 1;
$s();',
'error_message' => 'MixedFunctionCall',
],
'iterableOfObjectCannotAcceptIterableOfInt' => [
'<?php
/** @param iterable<string,object> $_p */
function accepts(iterable $_p): void {}
/** @return iterable<int,int> */
function iterable() { yield 1; }
accepts(iterable());',
'error_message' => 'InvalidArgument',
],
'iterableOfObjectCannotAcceptTraversableOfInt' => [
'<?php
/** @param iterable<string,object> $_p */
function accepts(iterable $_p): void {}
/** @return Traversable<int,int> */
function traversable() { yield 1; }
accepts(traversable());',
'error_message' => 'InvalidArgument',
],
'iterableOfObjectCannotAcceptGeneratorOfInt' => [
'<?php
/** @param iterable<string,object> $_p */
function accepts(iterable $_p): void {}
/** @return Generator<int,int,mixed,void> */
function generator() { yield 1; }
accepts(generator());',
'error_message' => 'InvalidArgument',
],
'iterableOfObjectCannotAcceptArrayOfInt' => [
'<?php
/** @param iterable<string,object> $_p */
function accepts(iterable $_p): void {}
/** @return array<int,int> */
function arr() { return [1]; }
accepts(arr());',
'error_message' => 'InvalidArgument',
],
'nonNullableByRef' => [
'<?php
function foo(string &$s) : void {}
function bar() : void {
foo($bar);
}',
'error_message' => 'NullReference',
],
'intCastByRef' => [
'<?php
function foo(int &$i) : void {}
$a = rand(0, 1) ? null : 5;
/** @psalm-suppress MixedArgument */
foo((int) $a);',
'error_message' => 'InvalidPassByReference',
],
'implicitAssignmentToStringFromMixed' => [
'<?php
/** @param "a"|"b" $s */
function takesString(string $s) : void {}
function takesInt(int $i) : void {}
/**
* @param mixed $s
* @psalm-suppress MixedArgument
*/
function bar($s) : void {
takesString($s);
takesInt($s);
}',
'error_message' => 'InvalidScalarArgument',
],
'tooFewArgsAccurateCount' => [
'<?php
preg_match(\'/adsf/\');',
'error_message' => 'TooFewArguments - src' . DIRECTORY_SEPARATOR . 'somefile.php:2:21 - Too few arguments for method preg_match - expecting 2 but saw 1',
],
'compactUndefinedVariable' => [
'<?php
function foo() : array {
return compact("a", "b", "c");
}',
'error_message' => 'UndefinedVariable',
],
'countCallableArrayShouldBeTwo' => [
'<?php
/** @param callable|false $x */
function example($x) : void {
if (is_array($x)) {
$c = count($x);
if ($c !== 2) {}
}
}',
'error_message' => 'TypeDoesNotContainType',
],
'arrayShiftUndefinedVariable' => [
'<?php
/** @psalm-suppress MissingParamType */
function foo($data): void {
/** @psalm-suppress MixedArgument */
array_unshift($data, $a);
}',
'error_message' => 'UndefinedVariable',
],
'coerceCallMapArgsInStrictMode' => [
'<?php
declare(strict_types=1);
function getStr() : ?string {
return rand(0,1) ? "test" : null;
}
function test() : void {
$g = getStr();
/** @psalm-suppress PossiblyNullArgument */
$x = strtoupper($g);
$c = "prefix " . (strtoupper($g ?? "") === "x" ? "xa" : "ya");
echo "$x, $c\n";
}',
'error_message' => 'TypeDoesNotContainType',
],
'noCrashOnEmptyArrayPush' => [
'<?php
array_push();',
'error_message' => 'TooFewArguments',
],
];
}
}
| 1 | 8,019 | Is this a good place for these test cases? | vimeo-psalm | php |
@@ -45,6 +45,7 @@ class Job(object):
self.vault = vault
if response_data:
for response_name, attr_name, default in self.ResponseDataElements:
+ print response_name, attr_name
setattr(self, attr_name, response_data[response_name])
else:
for response_name, attr_name, default in self.ResponseDataElements: | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import urllib
import json
class Job(object):
ResponseDataElements = (('Action', 'action', None),
('ArchiveId', 'archive_id', None),
('ArchiveSizeInBytes', 'archive_size', 0),
('Completed', 'completed', False),
('CompletionDate', 'completion_date', None),
('CreationDate', 'creation_date', None),
('InventorySizeInBytes', 'inventory_size', 0),
('JobDescription', 'description', None),
('JobId', 'id', None),
('SHA256TreeHash', 'sha256_treehash', None),
('SNSTopic', 'sns_topic', None),
('StatusCode', 'status_code', None),
('StatusMessage', 'status_message', None),
('VaultARN', 'arn', None))
def __init__(self, vault, response_data=None):
self.vault = vault
if response_data:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, response_data[response_name])
else:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, default)
def __repr__(self):
return 'Job(%s)' % self.arn
def get_output(self, byte_range=None):
"""
This operation downloads the output of the job. Depending on
the job type you specified when you initiated the job, the
output will be either the content of an archive or a vault
inventory.
You can download all the job output or download a portion of
the output by specifying a byte range. In the case of an
archive retrieval job, depending on the byte range you
specify, Amazon Glacier returns the checksum for the portion
of the data. You can compute the checksum on the client and
verify that the values match to ensure the portion you
downloaded is the correct data.
:type byte_range: tuple
:param range: A tuple of integer specifying the slice (in bytes)
of the archive you want to receive
"""
return self.vault.layer1.get_job_output(self.vault.name,
self.id,
byte_range)
| 1 | 8,497 | Did you mean to leave the print statement? | boto-boto | py |
@@ -184,6 +184,9 @@ Interpreter.false = new Buffer([]);
Interpreter.MAX_SCRIPT_ELEMENT_SIZE = 520;
+Interpreter.LOCKTIME_THRESHOLD = 500000000;
+Interpreter.LOCKTIME_THRESHOLD_BN = new BN(500000000);
+
// flags taken from bitcoind
// bitcoind commit: b5d1b1092998bc95313856d535c632ea5a8f9104
Interpreter.SCRIPT_VERIFY_NONE = 0; | 1 | 'use strict';
var _ = require('lodash');
var Script = require('./script');
var Opcode = require('../opcode');
var BN = require('../crypto/bn');
var Hash = require('../crypto/hash');
var Signature = require('../crypto/signature');
var PublicKey = require('../publickey');
/**
* Bitcoin transactions contain scripts. Each input has a script called the
* scriptSig, and each output has a script called the scriptPubkey. To validate
* an input, the input's script is concatenated with the referenced output script,
* and the result is executed. If at the end of execution the stack contains a
* "true" value, then the transaction is valid.
*
* The primary way to use this class is via the verify function.
* e.g., Interpreter().verify( ... );
*/
var Interpreter = function Interpreter(obj) {
if (!(this instanceof Interpreter)) {
return new Interpreter(obj);
}
if (obj) {
this.initialize();
this.set(obj);
} else {
this.initialize();
}
};
/**
* Verifies a Script by executing it and returns true if it is valid.
* This function needs to be provided with the scriptSig and the scriptPubkey
* separately.
* @param {Script} scriptSig - the script's first part (corresponding to the tx input)
* @param {Script} scriptPubkey - the script's last part (corresponding to the tx output)
* @param {Transaction=} tx - the Transaction containing the scriptSig in one input (used
* to check signature validity for some opcodes like OP_CHECKSIG)
* @param {number} nin - index of the transaction input containing the scriptSig verified.
* @param {number} flags - evaluation flags. See Interpreter.SCRIPT_* constants
*
* Translated from bitcoind's VerifyScript
*/
Interpreter.prototype.verify = function(scriptSig, scriptPubkey, tx, nin, flags) {
var Transaction = require('../transaction');
if (_.isUndefined(tx)) {
tx = new Transaction();
}
if (_.isUndefined(nin)) {
nin = 0;
}
if (_.isUndefined(flags)) {
flags = 0;
}
this.set({
script: scriptSig,
tx: tx,
nin: nin,
flags: flags
});
var stackCopy;
if ((flags & Interpreter.SCRIPT_VERIFY_SIGPUSHONLY) !== 0 && !scriptSig.isPushOnly()) {
this.errstr = 'SCRIPT_ERR_SIG_PUSHONLY';
return false;
}
// evaluate scriptSig
if (!this.evaluate()) {
return false;
}
if (flags & Interpreter.SCRIPT_VERIFY_P2SH) {
stackCopy = this.stack.slice();
}
var stack = this.stack;
this.initialize();
this.set({
script: scriptPubkey,
stack: stack,
tx: tx,
nin: nin,
flags: flags
});
// evaluate scriptPubkey
if (!this.evaluate()) {
return false;
}
if (this.stack.length === 0) {
this.errstr = 'SCRIPT_ERR_EVAL_FALSE_NO_RESULT';
return false;
}
var buf = this.stack[this.stack.length - 1];
if (!Interpreter.castToBool(buf)) {
this.errstr = 'SCRIPT_ERR_EVAL_FALSE_IN_STACK';
return false;
}
// Additional validation for spend-to-script-hash transactions:
if ((flags & Interpreter.SCRIPT_VERIFY_P2SH) && scriptPubkey.isScriptHashOut()) {
// scriptSig must be literals-only or validation fails
if (!scriptSig.isPushOnly()) {
this.errstr = 'SCRIPT_ERR_SIG_PUSHONLY';
return false;
}
// stackCopy cannot be empty here, because if it was the
// P2SH HASH <> EQUAL scriptPubKey would be evaluated with
// an empty stack and the EvalScript above would return false.
if (stackCopy.length === 0) {
throw new Error('internal error - stack copy empty');
}
var redeemScriptSerialized = stackCopy[stackCopy.length - 1];
var redeemScript = Script.fromBuffer(redeemScriptSerialized);
stackCopy.pop();
this.initialize();
this.set({
script: redeemScript,
stack: stackCopy,
tx: tx,
nin: nin,
flags: flags
});
// evaluate redeemScript
if (!this.evaluate()) {
return false;
}
if (stackCopy.length === 0) {
this.errstr = 'SCRIPT_ERR_EVAL_FALSE_NO_P2SH_STACK';
return false;
}
if (!Interpreter.castToBool(stackCopy[stackCopy.length - 1])) {
this.errstr = 'SCRIPT_ERR_EVAL_FALSE_IN_P2SH_STACK';
return false;
} else {
return true;
}
}
return true;
};
module.exports = Interpreter;
Interpreter.prototype.initialize = function(obj) {
this.stack = [];
this.altstack = [];
this.pc = 0;
this.pbegincodehash = 0;
this.nOpCount = 0;
this.vfExec = [];
this.errstr = '';
this.flags = 0;
};
Interpreter.prototype.set = function(obj) {
this.script = obj.script || this.script;
this.tx = obj.tx || this.tx;
this.nin = typeof obj.nin !== 'undefined' ? obj.nin : this.nin;
this.stack = obj.stack || this.stack;
this.altstack = obj.altack || this.altstack;
this.pc = typeof obj.pc !== 'undefined' ? obj.pc : this.pc;
this.pbegincodehash = typeof obj.pbegincodehash !== 'undefined' ? obj.pbegincodehash : this.pbegincodehash;
this.nOpCount = typeof obj.nOpCount !== 'undefined' ? obj.nOpCount : this.nOpCount;
this.vfExec = obj.vfExec || this.vfExec;
this.errstr = obj.errstr || this.errstr;
this.flags = typeof obj.flags !== 'undefined' ? obj.flags : this.flags;
};
Interpreter.true = new Buffer([1]);
Interpreter.false = new Buffer([]);
Interpreter.MAX_SCRIPT_ELEMENT_SIZE = 520;
// flags taken from bitcoind
// bitcoind commit: b5d1b1092998bc95313856d535c632ea5a8f9104
Interpreter.SCRIPT_VERIFY_NONE = 0;
// Evaluate P2SH subscripts (softfork safe, BIP16).
Interpreter.SCRIPT_VERIFY_P2SH = (1 << 0);
// Passing a non-strict-DER signature or one with undefined hashtype to a checksig operation causes script failure.
// Passing a pubkey that is not (0x04 + 64 bytes) or (0x02 or 0x03 + 32 bytes) to checksig causes that pubkey to be
// skipped (not softfork safe: this flag can widen the validity of OP_CHECKSIG OP_NOT).
Interpreter.SCRIPT_VERIFY_STRICTENC = (1 << 1);
// Passing a non-strict-DER signature to a checksig operation causes script failure (softfork safe, BIP62 rule 1)
Interpreter.SCRIPT_VERIFY_DERSIG = (1 << 2);
// Passing a non-strict-DER signature or one with S > order/2 to a checksig operation causes script failure
// (softfork safe, BIP62 rule 5).
Interpreter.SCRIPT_VERIFY_LOW_S = (1 << 3);
// verify dummy stack item consumed by CHECKMULTISIG is of zero-length (softfork safe, BIP62 rule 7).
Interpreter.SCRIPT_VERIFY_NULLDUMMY = (1 << 4);
// Using a non-push operator in the scriptSig causes script failure (softfork safe, BIP62 rule 2).
Interpreter.SCRIPT_VERIFY_SIGPUSHONLY = (1 << 5);
// Require minimal encodings for all push operations (OP_0... OP_16, OP_1NEGATE where possible, direct
// pushes up to 75 bytes, OP_PUSHDATA up to 255 bytes, OP_PUSHDATA2 for anything larger). Evaluating
// any other push causes the script to fail (BIP62 rule 3).
// In addition, whenever a stack element is interpreted as a number, it must be of minimal length (BIP62 rule 4).
// (softfork safe)
Interpreter.SCRIPT_VERIFY_MINIMALDATA = (1 << 6);
// Discourage use of NOPs reserved for upgrades (NOP1-10)
//
// Provided so that nodes can avoid accepting or mining transactions
// containing executed NOP's whose meaning may change after a soft-fork,
// thus rendering the script invalid; with this flag set executing
// discouraged NOPs fails the script. This verification flag will never be
// a mandatory flag applied to scripts in a block. NOPs that are not
// executed, e.g. within an unexecuted IF ENDIF block, are *not* rejected.
Interpreter.SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS = (1 << 7);
Interpreter.castToBool = function(buf) {
for (var i = 0; i < buf.length; i++) {
if (buf[i] !== 0) {
// can be negative zero
if (i === buf.length - 1 && buf[i] === 0x80) {
return false;
}
return true;
}
}
return false;
};
/**
* Translated from bitcoind's CheckSignatureEncoding
*/
Interpreter.prototype.checkSignatureEncoding = function(buf) {
var sig;
if ((this.flags & (Interpreter.SCRIPT_VERIFY_DERSIG | Interpreter.SCRIPT_VERIFY_LOW_S | Interpreter.SCRIPT_VERIFY_STRICTENC)) !== 0 && !Signature.isTxDER(buf)) {
this.errstr = 'SCRIPT_ERR_SIG_DER_INVALID_FORMAT';
return false;
} else if ((this.flags & Interpreter.SCRIPT_VERIFY_LOW_S) !== 0) {
sig = Signature.fromTxFormat(buf);
if (!sig.hasLowS()) {
this.errstr = 'SCRIPT_ERR_SIG_DER_HIGH_S';
return false;
}
} else if ((this.flags & Interpreter.SCRIPT_VERIFY_STRICTENC) !== 0) {
sig = Signature.fromTxFormat(buf);
if (!sig.hasDefinedHashtype()) {
this.errstr = 'SCRIPT_ERR_SIG_HASHTYPE';
return false;
}
}
return true;
};
/**
* Translated from bitcoind's CheckPubKeyEncoding
*/
Interpreter.prototype.checkPubkeyEncoding = function(buf) {
if ((this.flags & Interpreter.SCRIPT_VERIFY_STRICTENC) !== 0 && !PublicKey.isValid(buf)) {
this.errstr = 'SCRIPT_ERR_PUBKEYTYPE';
return false;
}
return true;
};
/**
* Based on bitcoind's EvalScript function, with the inner loop moved to
* Interpreter.prototype.step()
* bitcoind commit: b5d1b1092998bc95313856d535c632ea5a8f9104
*/
Interpreter.prototype.evaluate = function() {
if (this.script.toBuffer().length > 10000) {
this.errstr = 'SCRIPT_ERR_SCRIPT_SIZE';
return false;
}
try {
while (this.pc < this.script.chunks.length) {
var fSuccess = this.step();
if (!fSuccess) {
return false;
}
}
// Size limits
if (this.stack.length + this.altstack.length > 1000) {
this.errstr = 'SCRIPT_ERR_STACK_SIZE';
return false;
}
} catch (e) {
this.errstr = 'SCRIPT_ERR_UNKNOWN_ERROR: ' + e;
return false;
}
if (this.vfExec.length > 0) {
this.errstr = 'SCRIPT_ERR_UNBALANCED_CONDITIONAL';
return false;
}
return true;
};
/**
* Based on the inner loop of bitcoind's EvalScript function
* bitcoind commit: b5d1b1092998bc95313856d535c632ea5a8f9104
*/
Interpreter.prototype.step = function() {
var fRequireMinimal = (this.flags & Interpreter.SCRIPT_VERIFY_MINIMALDATA) !== 0;
//bool fExec = !count(vfExec.begin(), vfExec.end(), false);
var fExec = (this.vfExec.indexOf(false) === -1);
var buf, buf1, buf2, spliced, n, x1, x2, bn, bn1, bn2, bufSig, bufPubkey, subscript;
var sig, pubkey;
var fValue, fSuccess;
// Read instruction
var chunk = this.script.chunks[this.pc];
this.pc++;
var opcodenum = chunk.opcodenum;
if (_.isUndefined(opcodenum)) {
this.errstr = 'SCRIPT_ERR_UNDEFINED_OPCODE';
return false;
}
if (chunk.buf && chunk.buf.length > Interpreter.MAX_SCRIPT_ELEMENT_SIZE) {
this.errstr = 'SCRIPT_ERR_PUSH_SIZE';
return false;
}
// Note how Opcode.OP_RESERVED does not count towards the opcode limit.
if (opcodenum > Opcode.OP_16 && ++(this.nOpCount) > 201) {
this.errstr = 'SCRIPT_ERR_OP_COUNT';
return false;
}
if (opcodenum === Opcode.OP_CAT ||
opcodenum === Opcode.OP_SUBSTR ||
opcodenum === Opcode.OP_LEFT ||
opcodenum === Opcode.OP_RIGHT ||
opcodenum === Opcode.OP_INVERT ||
opcodenum === Opcode.OP_AND ||
opcodenum === Opcode.OP_OR ||
opcodenum === Opcode.OP_XOR ||
opcodenum === Opcode.OP_2MUL ||
opcodenum === Opcode.OP_2DIV ||
opcodenum === Opcode.OP_MUL ||
opcodenum === Opcode.OP_DIV ||
opcodenum === Opcode.OP_MOD ||
opcodenum === Opcode.OP_LSHIFT ||
opcodenum === Opcode.OP_RSHIFT) {
this.errstr = 'SCRIPT_ERR_DISABLED_OPCODE';
return false;
}
if (fExec && 0 <= opcodenum && opcodenum <= Opcode.OP_PUSHDATA4) {
if (fRequireMinimal && !this.script.checkMinimalPush(this.pc - 1)) {
this.errstr = 'SCRIPT_ERR_MINIMALDATA';
return false;
}
if (!chunk.buf) {
this.stack.push(Interpreter.false);
} else if (chunk.len !== chunk.buf.length) {
throw new Error('Length of push value not equal to length of data');
} else {
this.stack.push(chunk.buf);
}
} else if (fExec || (Opcode.OP_IF <= opcodenum && opcodenum <= Opcode.OP_ENDIF)) {
switch (opcodenum) {
// Push value
case Opcode.OP_1NEGATE:
case Opcode.OP_1:
case Opcode.OP_2:
case Opcode.OP_3:
case Opcode.OP_4:
case Opcode.OP_5:
case Opcode.OP_6:
case Opcode.OP_7:
case Opcode.OP_8:
case Opcode.OP_9:
case Opcode.OP_10:
case Opcode.OP_11:
case Opcode.OP_12:
case Opcode.OP_13:
case Opcode.OP_14:
case Opcode.OP_15:
case Opcode.OP_16:
{
// ( -- value)
// ScriptNum bn((int)opcode - (int)(Opcode.OP_1 - 1));
n = opcodenum - (Opcode.OP_1 - 1);
buf = new BN(n).toScriptNumBuffer();
this.stack.push(buf);
// The result of these opcodes should always be the minimal way to push the data
// they push, so no need for a CheckMinimalPush here.
}
break;
//
// Control
//
case Opcode.OP_NOP:
break;
case Opcode.OP_NOP1:
case Opcode.OP_NOP2:
case Opcode.OP_NOP3:
case Opcode.OP_NOP4:
case Opcode.OP_NOP5:
case Opcode.OP_NOP6:
case Opcode.OP_NOP7:
case Opcode.OP_NOP8:
case Opcode.OP_NOP9:
case Opcode.OP_NOP10:
{
if (this.flags & Interpreter.SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS) {
this.errstr = 'SCRIPT_ERR_DISCOURAGE_UPGRADABLE_NOPS';
return false;
}
}
break;
case Opcode.OP_IF:
case Opcode.OP_NOTIF:
{
// <expression> if [statements] [else [statements]] endif
// bool fValue = false;
fValue = false;
if (fExec) {
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_UNBALANCED_CONDITIONAL';
return false;
}
buf = this.stack.pop();
fValue = Interpreter.castToBool(buf);
if (opcodenum === Opcode.OP_NOTIF) {
fValue = !fValue;
}
}
this.vfExec.push(fValue);
}
break;
case Opcode.OP_ELSE:
{
if (this.vfExec.length === 0) {
this.errstr = 'SCRIPT_ERR_UNBALANCED_CONDITIONAL';
return false;
}
this.vfExec[this.vfExec.length - 1] = !this.vfExec[this.vfExec.length - 1];
}
break;
case Opcode.OP_ENDIF:
{
if (this.vfExec.length === 0) {
this.errstr = 'SCRIPT_ERR_UNBALANCED_CONDITIONAL';
return false;
}
this.vfExec.pop();
}
break;
case Opcode.OP_VERIFY:
{
// (true -- ) or
// (false -- false) and return
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf = this.stack[this.stack.length - 1];
fValue = Interpreter.castToBool(buf);
if (fValue) {
this.stack.pop();
} else {
this.errstr = 'SCRIPT_ERR_VERIFY';
return false;
}
}
break;
case Opcode.OP_RETURN:
{
this.errstr = 'SCRIPT_ERR_OP_RETURN';
return false;
}
break;
//
// Stack ops
//
case Opcode.OP_TOALTSTACK:
{
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
this.altstack.push(this.stack.pop());
}
break;
case Opcode.OP_FROMALTSTACK:
{
if (this.altstack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_ALTSTACK_OPERATION';
return false;
}
this.stack.push(this.altstack.pop());
}
break;
case Opcode.OP_2DROP:
{
// (x1 x2 -- )
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
this.stack.pop();
this.stack.pop();
}
break;
case Opcode.OP_2DUP:
{
// (x1 x2 -- x1 x2 x1 x2)
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf1 = this.stack[this.stack.length - 2];
buf2 = this.stack[this.stack.length - 1];
this.stack.push(buf1);
this.stack.push(buf2);
}
break;
case Opcode.OP_3DUP:
{
// (x1 x2 x3 -- x1 x2 x3 x1 x2 x3)
if (this.stack.length < 3) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf1 = this.stack[this.stack.length - 3];
buf2 = this.stack[this.stack.length - 2];
var buf3 = this.stack[this.stack.length - 1];
this.stack.push(buf1);
this.stack.push(buf2);
this.stack.push(buf3);
}
break;
case Opcode.OP_2OVER:
{
// (x1 x2 x3 x4 -- x1 x2 x3 x4 x1 x2)
if (this.stack.length < 4) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf1 = this.stack[this.stack.length - 4];
buf2 = this.stack[this.stack.length - 3];
this.stack.push(buf1);
this.stack.push(buf2);
}
break;
case Opcode.OP_2ROT:
{
// (x1 x2 x3 x4 x5 x6 -- x3 x4 x5 x6 x1 x2)
if (this.stack.length < 6) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
spliced = this.stack.splice(this.stack.length - 6, 2);
this.stack.push(spliced[0]);
this.stack.push(spliced[1]);
}
break;
case Opcode.OP_2SWAP:
{
// (x1 x2 x3 x4 -- x3 x4 x1 x2)
if (this.stack.length < 4) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
spliced = this.stack.splice(this.stack.length - 4, 2);
this.stack.push(spliced[0]);
this.stack.push(spliced[1]);
}
break;
case Opcode.OP_IFDUP:
{
// (x - 0 | x x)
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf = this.stack[this.stack.length - 1];
fValue = Interpreter.castToBool(buf);
if (fValue) {
this.stack.push(buf);
}
}
break;
case Opcode.OP_DEPTH:
{
// -- stacksize
buf = new BN(this.stack.length).toScriptNumBuffer();
this.stack.push(buf);
}
break;
case Opcode.OP_DROP:
{
// (x -- )
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
this.stack.pop();
}
break;
case Opcode.OP_DUP:
{
// (x -- x x)
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
this.stack.push(this.stack[this.stack.length - 1]);
}
break;
case Opcode.OP_NIP:
{
// (x1 x2 -- x2)
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
this.stack.splice(this.stack.length - 2, 1);
}
break;
case Opcode.OP_OVER:
{
// (x1 x2 -- x1 x2 x1)
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
this.stack.push(this.stack[this.stack.length - 2]);
}
break;
case Opcode.OP_PICK:
case Opcode.OP_ROLL:
{
// (xn ... x2 x1 x0 n - xn ... x2 x1 x0 xn)
// (xn ... x2 x1 x0 n - ... x2 x1 x0 xn)
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf = this.stack[this.stack.length - 1];
bn = BN.fromScriptNumBuffer(buf, fRequireMinimal);
n = bn.toNumber();
this.stack.pop();
if (n < 0 || n >= this.stack.length) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf = this.stack[this.stack.length - n - 1];
if (opcodenum === Opcode.OP_ROLL) {
this.stack.splice(this.stack.length - n - 1, 1);
}
this.stack.push(buf);
}
break;
case Opcode.OP_ROT:
{
// (x1 x2 x3 -- x2 x3 x1)
// x2 x1 x3 after first swap
// x2 x3 x1 after second swap
if (this.stack.length < 3) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
x1 = this.stack[this.stack.length - 3];
x2 = this.stack[this.stack.length - 2];
var x3 = this.stack[this.stack.length - 1];
this.stack[this.stack.length - 3] = x2;
this.stack[this.stack.length - 2] = x3;
this.stack[this.stack.length - 1] = x1;
}
break;
case Opcode.OP_SWAP:
{
// (x1 x2 -- x2 x1)
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
x1 = this.stack[this.stack.length - 2];
x2 = this.stack[this.stack.length - 1];
this.stack[this.stack.length - 2] = x2;
this.stack[this.stack.length - 1] = x1;
}
break;
case Opcode.OP_TUCK:
{
// (x1 x2 -- x2 x1 x2)
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
this.stack.splice(this.stack.length - 2, 0, this.stack[this.stack.length - 1]);
}
break;
case Opcode.OP_SIZE:
{
// (in -- in size)
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
bn = new BN(this.stack[this.stack.length - 1].length);
this.stack.push(bn.toScriptNumBuffer());
}
break;
//
// Bitwise logic
//
case Opcode.OP_EQUAL:
case Opcode.OP_EQUALVERIFY:
//case Opcode.OP_NOTEQUAL: // use Opcode.OP_NUMNOTEQUAL
{
// (x1 x2 - bool)
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf1 = this.stack[this.stack.length - 2];
buf2 = this.stack[this.stack.length - 1];
var fEqual = buf1.toString('hex') === buf2.toString('hex');
this.stack.pop();
this.stack.pop();
this.stack.push(fEqual ? Interpreter.true : Interpreter.false);
if (opcodenum === Opcode.OP_EQUALVERIFY) {
if (fEqual) {
this.stack.pop();
} else {
this.errstr = 'SCRIPT_ERR_EQUALVERIFY';
return false;
}
}
}
break;
//
// Numeric
//
case Opcode.OP_1ADD:
case Opcode.OP_1SUB:
case Opcode.OP_NEGATE:
case Opcode.OP_ABS:
case Opcode.OP_NOT:
case Opcode.OP_0NOTEQUAL:
{
// (in -- out)
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf = this.stack[this.stack.length - 1];
bn = BN.fromScriptNumBuffer(buf, fRequireMinimal);
switch (opcodenum) {
case Opcode.OP_1ADD:
bn = bn.add(BN.One);
break;
case Opcode.OP_1SUB:
bn = bn.sub(BN.One);
break;
case Opcode.OP_NEGATE:
bn = bn.neg();
break;
case Opcode.OP_ABS:
if (bn.cmp(BN.Zero) < 0) {
bn = bn.neg();
}
break;
case Opcode.OP_NOT:
bn = new BN((bn.cmp(BN.Zero) === 0) + 0);
break;
case Opcode.OP_0NOTEQUAL:
bn = new BN((bn.cmp(BN.Zero) !== 0) + 0);
break;
//default: assert(!'invalid opcode'); break; // TODO: does this ever occur?
}
this.stack.pop();
this.stack.push(bn.toScriptNumBuffer());
}
break;
case Opcode.OP_ADD:
case Opcode.OP_SUB:
case Opcode.OP_BOOLAND:
case Opcode.OP_BOOLOR:
case Opcode.OP_NUMEQUAL:
case Opcode.OP_NUMEQUALVERIFY:
case Opcode.OP_NUMNOTEQUAL:
case Opcode.OP_LESSTHAN:
case Opcode.OP_GREATERTHAN:
case Opcode.OP_LESSTHANOREQUAL:
case Opcode.OP_GREATERTHANOREQUAL:
case Opcode.OP_MIN:
case Opcode.OP_MAX:
{
// (x1 x2 -- out)
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
bn1 = BN.fromScriptNumBuffer(this.stack[this.stack.length - 2], fRequireMinimal);
bn2 = BN.fromScriptNumBuffer(this.stack[this.stack.length - 1], fRequireMinimal);
bn = new BN(0);
switch (opcodenum) {
case Opcode.OP_ADD:
bn = bn1.add(bn2);
break;
case Opcode.OP_SUB:
bn = bn1.sub(bn2);
break;
// case Opcode.OP_BOOLAND: bn = (bn1 != bnZero && bn2 != bnZero); break;
case Opcode.OP_BOOLAND:
bn = new BN(((bn1.cmp(BN.Zero) !== 0) && (bn2.cmp(BN.Zero) !== 0)) + 0);
break;
// case Opcode.OP_BOOLOR: bn = (bn1 != bnZero || bn2 != bnZero); break;
case Opcode.OP_BOOLOR:
bn = new BN(((bn1.cmp(BN.Zero) !== 0) || (bn2.cmp(BN.Zero) !== 0)) + 0);
break;
// case Opcode.OP_NUMEQUAL: bn = (bn1 == bn2); break;
case Opcode.OP_NUMEQUAL:
bn = new BN((bn1.cmp(bn2) === 0) + 0);
break;
// case Opcode.OP_NUMEQUALVERIFY: bn = (bn1 == bn2); break;
case Opcode.OP_NUMEQUALVERIFY:
bn = new BN((bn1.cmp(bn2) === 0) + 0);
break;
// case Opcode.OP_NUMNOTEQUAL: bn = (bn1 != bn2); break;
case Opcode.OP_NUMNOTEQUAL:
bn = new BN((bn1.cmp(bn2) !== 0) + 0);
break;
// case Opcode.OP_LESSTHAN: bn = (bn1 < bn2); break;
case Opcode.OP_LESSTHAN:
bn = new BN((bn1.cmp(bn2) < 0) + 0);
break;
// case Opcode.OP_GREATERTHAN: bn = (bn1 > bn2); break;
case Opcode.OP_GREATERTHAN:
bn = new BN((bn1.cmp(bn2) > 0) + 0);
break;
// case Opcode.OP_LESSTHANOREQUAL: bn = (bn1 <= bn2); break;
case Opcode.OP_LESSTHANOREQUAL:
bn = new BN((bn1.cmp(bn2) <= 0) + 0);
break;
// case Opcode.OP_GREATERTHANOREQUAL: bn = (bn1 >= bn2); break;
case Opcode.OP_GREATERTHANOREQUAL:
bn = new BN((bn1.cmp(bn2) >= 0) + 0);
break;
case Opcode.OP_MIN:
bn = (bn1.cmp(bn2) < 0 ? bn1 : bn2);
break;
case Opcode.OP_MAX:
bn = (bn1.cmp(bn2) > 0 ? bn1 : bn2);
break;
// default: assert(!'invalid opcode'); break; //TODO: does this ever occur?
}
this.stack.pop();
this.stack.pop();
this.stack.push(bn.toScriptNumBuffer());
if (opcodenum === Opcode.OP_NUMEQUALVERIFY) {
// if (CastToBool(stacktop(-1)))
if (Interpreter.castToBool(this.stack[this.stack.length - 1])) {
this.stack.pop();
} else {
this.errstr = 'SCRIPT_ERR_NUMEQUALVERIFY';
return false;
}
}
}
break;
case Opcode.OP_WITHIN:
{
// (x min max -- out)
if (this.stack.length < 3) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
bn1 = BN.fromScriptNumBuffer(this.stack[this.stack.length - 3], fRequireMinimal);
bn2 = BN.fromScriptNumBuffer(this.stack[this.stack.length - 2], fRequireMinimal);
var bn3 = BN.fromScriptNumBuffer(this.stack[this.stack.length - 1], fRequireMinimal);
//bool fValue = (bn2 <= bn1 && bn1 < bn3);
fValue = (bn2.cmp(bn1) <= 0) && (bn1.cmp(bn3) < 0);
this.stack.pop();
this.stack.pop();
this.stack.pop();
this.stack.push(fValue ? Interpreter.true : Interpreter.false);
}
break;
//
// Crypto
//
case Opcode.OP_RIPEMD160:
case Opcode.OP_SHA1:
case Opcode.OP_SHA256:
case Opcode.OP_HASH160:
case Opcode.OP_HASH256:
{
// (in -- hash)
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
buf = this.stack[this.stack.length - 1];
//valtype vchHash((opcode == Opcode.OP_RIPEMD160 ||
// opcode == Opcode.OP_SHA1 || opcode == Opcode.OP_HASH160) ? 20 : 32);
var bufHash;
if (opcodenum === Opcode.OP_RIPEMD160) {
bufHash = Hash.ripemd160(buf);
} else if (opcodenum === Opcode.OP_SHA1) {
bufHash = Hash.sha1(buf);
} else if (opcodenum === Opcode.OP_SHA256) {
bufHash = Hash.sha256(buf);
} else if (opcodenum === Opcode.OP_HASH160) {
bufHash = Hash.sha256ripemd160(buf);
} else if (opcodenum === Opcode.OP_HASH256) {
bufHash = Hash.sha256sha256(buf);
}
this.stack.pop();
this.stack.push(bufHash);
}
break;
case Opcode.OP_CODESEPARATOR:
{
// Hash starts after the code separator
this.pbegincodehash = this.pc;
}
break;
case Opcode.OP_CHECKSIG:
case Opcode.OP_CHECKSIGVERIFY:
{
// (sig pubkey -- bool)
if (this.stack.length < 2) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
bufSig = this.stack[this.stack.length - 2];
bufPubkey = this.stack[this.stack.length - 1];
// Subset of script starting at the most recent codeseparator
// CScript scriptCode(pbegincodehash, pend);
subscript = new Script().set({
chunks: this.script.chunks.slice(this.pbegincodehash)
});
// Drop the signature, since there's no way for a signature to sign itself
var tmpScript = new Script().add(bufSig);
subscript.findAndDelete(tmpScript);
if (!this.checkSignatureEncoding(bufSig) || !this.checkPubkeyEncoding(bufPubkey)) {
return false;
}
try {
sig = Signature.fromTxFormat(bufSig);
pubkey = PublicKey.fromBuffer(bufPubkey, false);
fSuccess = this.tx.verifySignature(sig, pubkey, this.nin, subscript);
} catch (e) {
//invalid sig or pubkey
fSuccess = false;
}
this.stack.pop();
this.stack.pop();
// stack.push_back(fSuccess ? vchTrue : vchFalse);
this.stack.push(fSuccess ? Interpreter.true : Interpreter.false);
if (opcodenum === Opcode.OP_CHECKSIGVERIFY) {
if (fSuccess) {
this.stack.pop();
} else {
this.errstr = 'SCRIPT_ERR_CHECKSIGVERIFY';
return false;
}
}
}
break;
case Opcode.OP_CHECKMULTISIG:
case Opcode.OP_CHECKMULTISIGVERIFY:
{
// ([sig ...] num_of_signatures [pubkey ...] num_of_pubkeys -- bool)
var i = 1;
if (this.stack.length < i) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
var nKeysCount = BN.fromScriptNumBuffer(this.stack[this.stack.length - i], fRequireMinimal).toNumber();
if (nKeysCount < 0 || nKeysCount > 20) {
this.errstr = 'SCRIPT_ERR_PUBKEY_COUNT';
return false;
}
this.nOpCount += nKeysCount;
if (this.nOpCount > 201) {
this.errstr = 'SCRIPT_ERR_OP_COUNT';
return false;
}
// int ikey = ++i;
var ikey = ++i;
i += nKeysCount;
if (this.stack.length < i) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
var nSigsCount = BN.fromScriptNumBuffer(this.stack[this.stack.length - i], fRequireMinimal).toNumber();
if (nSigsCount < 0 || nSigsCount > nKeysCount) {
this.errstr = 'SCRIPT_ERR_SIG_COUNT';
return false;
}
// int isig = ++i;
var isig = ++i;
i += nSigsCount;
if (this.stack.length < i) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
// Subset of script starting at the most recent codeseparator
subscript = new Script().set({
chunks: this.script.chunks.slice(this.pbegincodehash)
});
// Drop the signatures, since there's no way for a signature to sign itself
for (var k = 0; k < nSigsCount; k++) {
bufSig = this.stack[this.stack.length - isig - k];
subscript.findAndDelete(new Script().add(bufSig));
}
fSuccess = true;
while (fSuccess && nSigsCount > 0) {
// valtype& vchSig = stacktop(-isig);
bufSig = this.stack[this.stack.length - isig];
// valtype& vchPubKey = stacktop(-ikey);
bufPubkey = this.stack[this.stack.length - ikey];
if (!this.checkSignatureEncoding(bufSig) || !this.checkPubkeyEncoding(bufPubkey)) {
return false;
}
var fOk;
try {
sig = Signature.fromTxFormat(bufSig);
pubkey = PublicKey.fromBuffer(bufPubkey, false);
fOk = this.tx.verifySignature(sig, pubkey, this.nin, subscript);
} catch (e) {
//invalid sig or pubkey
fOk = false;
}
if (fOk) {
isig++;
nSigsCount--;
}
ikey++;
nKeysCount--;
// If there are more signatures left than keys left,
// then too many signatures have failed
if (nSigsCount > nKeysCount) {
fSuccess = false;
}
}
// Clean up stack of actual arguments
while (i-- > 1) {
this.stack.pop();
}
// A bug causes CHECKMULTISIG to consume one extra argument
// whose contents were not checked in any way.
//
// Unfortunately this is a potential source of mutability,
// so optionally verify it is exactly equal to zero prior
// to removing it from the stack.
if (this.stack.length < 1) {
this.errstr = 'SCRIPT_ERR_INVALID_STACK_OPERATION';
return false;
}
if ((this.flags & Interpreter.SCRIPT_VERIFY_NULLDUMMY) && this.stack[this.stack.length - 1].length) {
this.errstr = 'SCRIPT_ERR_SIG_NULLDUMMY';
return false;
}
this.stack.pop();
this.stack.push(fSuccess ? Interpreter.true : Interpreter.false);
if (opcodenum === Opcode.OP_CHECKMULTISIGVERIFY) {
if (fSuccess) {
this.stack.pop();
} else {
this.errstr = 'SCRIPT_ERR_CHECKMULTISIGVERIFY';
return false;
}
}
}
break;
default:
this.errstr = 'SCRIPT_ERR_BAD_OPCODE';
return false;
}
}
return true;
};
| 1 | 14,460 | I would do `new BN(Interpreter.LOCKTIME_THRESHOLD)` | bitpay-bitcore | js |
@@ -23,9 +23,7 @@ package com.github.javaparser.ast.visitor;
import com.github.javaparser.ast.Node;
-import java.util.ArrayList;
-import java.util.LinkedList;
-import java.util.Queue;
+import java.util.*;
/**
* Iterate over all the nodes in (a part of) the AST. | 1 | /*
* Copyright (C) 2007-2010 Júlio Vilmar Gesser.
* Copyright (C) 2011, 2013-2016 The JavaParser Team.
*
* This file is part of JavaParser.
*
* JavaParser can be used either under the terms of
* a) the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* b) the terms of the Apache License
*
* You should have received a copy of both licenses in LICENCE.LGPL and
* LICENCE.APACHE. Please refer to those files for details.
*
* JavaParser is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
package com.github.javaparser.ast.visitor;
import com.github.javaparser.ast.Node;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.Queue;
/**
* Iterate over all the nodes in (a part of) the AST.
*/
public abstract class TreeVisitor {
public void visitLeavesFirst(Node node) {
for (Node child : node.getChildNodes()) {
visitLeavesFirst(child);
}
process(node);
}
/**
* Performs a pre-order node traversal starting with a given node. When each node is visited,
* {@link #process(Node)} is called for further processing.
*
* @param node The node at which the traversal begins.
*
* @see <a href="https://en.wikipedia.org/wiki/Pre-order">Pre-order traversal</a>
*/
public void visitPreOrder(Node node) {
process(node);
new ArrayList<>(node.getChildNodes()).forEach(this::visitPreOrder);
}
/**
* Performs a post-order node traversal starting with a given node. When each node is visited,
* {@link #process(Node)} is called for further processing.
*
* @param node The node at which the traversal begins.
*
* @see <a href="https://en.wikipedia.org/wiki/Post-order">Post-order traversal</a>
*/
public void visitPostOrder(Node node) {
new ArrayList<>(node.getChildNodes()).forEach(this::visitPostOrder);
process(node);
}
/**
* Performs a pre-order node traversal starting with a given node. When each node is visited,
* {@link #process(Node)} is called for further processing.
*
* @deprecated As of release 3.1.0, replaced by {@link #visitPreOrder(Node)}
*
* @param node The node at which the traversal begins.
*
* @see <a href="https://en.wikipedia.org/wiki/Pre-order">Pre-order traversal</a>
*/
@Deprecated
public void visitDepthFirst(Node node) {
visitPreOrder(node);
}
/**
* https://en.wikipedia.org/wiki/Breadth-first_search
*
* @param node the start node, and the first one that is passed to process(node).
*/
public void visitBreadthFirst(Node node) {
final Queue<Node> queue = new LinkedList<>();
queue.offer(node);
while (queue.size() > 0) {
final Node head = queue.peek();
for (Node child : head.getChildNodes()) {
queue.offer(child);
}
process(queue.poll());
}
}
/**
* Process the given node.
*
* @param node The current node to process.
*/
public abstract void process(Node node);
}
| 1 | 11,782 | @ftomassetti I turned the tree walking algorithms into iterators so you don't have to visit every node when you only wanted a few, like for a `findFirst`. | javaparser-javaparser | java |
@@ -416,7 +416,7 @@ func timerTypeFromThrift(
func timerTypeToReason(
timerType timerType,
) string {
- return fmt.Sprintf("cadenceInternal:Timeout %v", timerTypeToThrift(timerType))
+ return fmt.Sprintf("cadenceInternal:Timeout TimeoutType%v", timerTypeToThrift(timerType))
}
// Len implements sort.Interface | 1 | // Copyright (c) 2019 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination timerSequence_mock.go
package history
import (
"fmt"
"sort"
"time"
"github.com/temporalio/temporal/.gen/go/shared"
"github.com/temporalio/temporal/common"
"github.com/temporalio/temporal/common/clock"
"github.com/temporalio/temporal/common/persistence"
)
type timerType int32
const (
timerTypeStartToClose = timerType(shared.TimeoutTypeStartToClose)
timerTypeScheduleToStart = timerType(shared.TimeoutTypeScheduleToStart)
timerTypeScheduleToClose = timerType(shared.TimeoutTypeScheduleToClose)
timerTypeHeartbeat = timerType(shared.TimeoutTypeHeartbeat)
)
const (
// activity / user timer task not created
timerTaskStatusNone = iota
timerTaskStatusCreated
)
const (
// activity timer task status
timerTaskStatusCreatedStartToClose = 1 << iota
timerTaskStatusCreatedScheduleToStart
timerTaskStatusCreatedScheduleToClose
timerTaskStatusCreatedHeartbeat
)
type (
// timerSequenceID
timerSequenceID struct {
eventID int64
timestamp time.Time
timerType timerType
timerCreated bool
attempt int32
}
timerSequenceIDs []timerSequenceID
timerSequence interface {
isExpired(referenceTime time.Time, timerSequenceID timerSequenceID) bool
createNextUserTimer() (bool, error)
createNextActivityTimer() (bool, error)
loadAndSortUserTimers() []timerSequenceID
loadAndSortActivityTimers() []timerSequenceID
}
timerSequenceImpl struct {
timeSource clock.TimeSource
mutableState mutableState
}
)
var _ timerSequence = (*timerSequenceImpl)(nil)
func newTimerSequence(
timeSource clock.TimeSource,
mutableState mutableState,
) *timerSequenceImpl {
return &timerSequenceImpl{
timeSource: timeSource,
mutableState: mutableState,
}
}
func (t *timerSequenceImpl) isExpired(
referenceTime time.Time,
timerSequenceID timerSequenceID,
) bool {
// Cassandra timestamp resolution is in millisecond
// here we do the check in terms of second resolution.
return timerSequenceID.timestamp.Unix() <= referenceTime.Unix()
}
func (t *timerSequenceImpl) createNextUserTimer() (bool, error) {
sequenceIDs := t.loadAndSortUserTimers()
if len(sequenceIDs) == 0 {
return false, nil
}
firstTimerTask := sequenceIDs[0]
// timer has already been created
if firstTimerTask.timerCreated {
return false, nil
}
timerInfo, ok := t.mutableState.GetUserTimerInfoByEventID(firstTimerTask.eventID)
if !ok {
return false, &shared.InternalServiceError{
Message: fmt.Sprintf("unable to load activity info %v", firstTimerTask.eventID),
}
}
// mark timer task mask as indication that timer task is generated
// here TaskID is misleading attr, should be called timer created flag or something
timerInfo.TaskStatus = timerTaskStatusCreated
if err := t.mutableState.UpdateUserTimer(timerInfo); err != nil {
return false, err
}
t.mutableState.AddTimerTasks(&persistence.UserTimerTask{
// TaskID is set by shard
VisibilityTimestamp: firstTimerTask.timestamp,
EventID: firstTimerTask.eventID,
Version: t.mutableState.GetCurrentVersion(),
})
return true, nil
}
func (t *timerSequenceImpl) createNextActivityTimer() (bool, error) {
sequenceIDs := t.loadAndSortActivityTimers()
if len(sequenceIDs) == 0 {
return false, nil
}
firstTimerTask := sequenceIDs[0]
// timer has already been created
if firstTimerTask.timerCreated {
return false, nil
}
activityInfo, ok := t.mutableState.GetActivityInfo(firstTimerTask.eventID)
if !ok {
return false, &shared.InternalServiceError{
Message: fmt.Sprintf("unable to load activity info %v", firstTimerTask.eventID),
}
}
// mark timer task mask as indication that timer task is generated
activityInfo.TimerTaskStatus |= timerTypeToTimerMask(firstTimerTask.timerType)
if err := t.mutableState.UpdateActivity(activityInfo); err != nil {
return false, err
}
t.mutableState.AddTimerTasks(&persistence.ActivityTimeoutTask{
// TaskID is set by shard
VisibilityTimestamp: firstTimerTask.timestamp,
TimeoutType: int(firstTimerTask.timerType),
EventID: firstTimerTask.eventID,
Attempt: int64(firstTimerTask.attempt),
Version: t.mutableState.GetCurrentVersion(),
})
return true, nil
}
func (t *timerSequenceImpl) loadAndSortUserTimers() []timerSequenceID {
pendingTimers := t.mutableState.GetPendingTimerInfos()
timers := make(timerSequenceIDs, 0, len(pendingTimers))
for _, timerInfo := range pendingTimers {
if sequenceID := t.getUserTimerTimeout(
timerInfo,
); sequenceID != nil {
timers = append(timers, *sequenceID)
}
}
sort.Sort(timers)
return timers
}
func (t *timerSequenceImpl) loadAndSortActivityTimers() []timerSequenceID {
// there can be 4 timer per activity
// see timerType
pendingActivities := t.mutableState.GetPendingActivityInfos()
activityTimers := make(timerSequenceIDs, 0, len(pendingActivities)*4)
for _, activityInfo := range pendingActivities {
if sequenceID := t.getActivityScheduleToCloseTimeout(
activityInfo,
); sequenceID != nil {
activityTimers = append(activityTimers, *sequenceID)
}
if sequenceID := t.getActivityScheduleToStartTimeout(
activityInfo,
); sequenceID != nil {
activityTimers = append(activityTimers, *sequenceID)
}
if sequenceID := t.getActivityStartToCloseTimeout(
activityInfo,
); sequenceID != nil {
activityTimers = append(activityTimers, *sequenceID)
}
if sequenceID := t.getActivityHeartbeatTimeout(
activityInfo,
); sequenceID != nil {
activityTimers = append(activityTimers, *sequenceID)
}
}
sort.Sort(activityTimers)
return activityTimers
}
func (t *timerSequenceImpl) getUserTimerTimeout(
timerInfo *persistence.TimerInfo,
) *timerSequenceID {
return &timerSequenceID{
eventID: timerInfo.StartedID,
timestamp: timerInfo.ExpiryTime,
timerType: timerTypeStartToClose,
timerCreated: timerInfo.TaskStatus == timerTaskStatusCreated,
attempt: 0,
}
}
func (t *timerSequenceImpl) getActivityScheduleToStartTimeout(
activityInfo *persistence.ActivityInfo,
) *timerSequenceID {
// activity is not scheduled yet, probably due to retry & backoff
if activityInfo.ScheduleID == common.EmptyEventID {
return nil
}
// activity is already started
if activityInfo.StartedID != common.EmptyEventID {
return nil
}
startTimeout := activityInfo.ScheduledTime.Add(
time.Duration(activityInfo.ScheduleToStartTimeout) * time.Second,
)
return &timerSequenceID{
eventID: activityInfo.ScheduleID,
timestamp: startTimeout,
timerType: timerTypeScheduleToStart,
timerCreated: (activityInfo.TimerTaskStatus & timerTaskStatusCreatedScheduleToStart) > 0,
attempt: activityInfo.Attempt,
}
}
func (t *timerSequenceImpl) getActivityScheduleToCloseTimeout(
activityInfo *persistence.ActivityInfo,
) *timerSequenceID {
// activity is not scheduled yet, probably due to retry & backoff
if activityInfo.ScheduleID == common.EmptyEventID {
return nil
}
closeTimeout := activityInfo.ScheduledTime.Add(
time.Duration(activityInfo.ScheduleToCloseTimeout) * time.Second,
)
return &timerSequenceID{
eventID: activityInfo.ScheduleID,
timestamp: closeTimeout,
timerType: timerTypeScheduleToClose,
timerCreated: (activityInfo.TimerTaskStatus & timerTaskStatusCreatedScheduleToClose) > 0,
attempt: activityInfo.Attempt,
}
}
func (t *timerSequenceImpl) getActivityStartToCloseTimeout(
activityInfo *persistence.ActivityInfo,
) *timerSequenceID {
// activity is not scheduled yet, probably due to retry & backoff
if activityInfo.ScheduleID == common.EmptyEventID {
return nil
}
// activity is not started yet
if activityInfo.StartedID == common.EmptyEventID {
return nil
}
closeTimeout := activityInfo.StartedTime.Add(
time.Duration(activityInfo.StartToCloseTimeout) * time.Second,
)
return &timerSequenceID{
eventID: activityInfo.ScheduleID,
timestamp: closeTimeout,
timerType: timerTypeStartToClose,
timerCreated: (activityInfo.TimerTaskStatus & timerTaskStatusCreatedStartToClose) > 0,
attempt: activityInfo.Attempt,
}
}
func (t *timerSequenceImpl) getActivityHeartbeatTimeout(
activityInfo *persistence.ActivityInfo,
) *timerSequenceID {
// activity is not scheduled yet, probably due to retry & backoff
if activityInfo.ScheduleID == common.EmptyEventID {
return nil
}
// activity is not started yet
if activityInfo.StartedID == common.EmptyEventID {
return nil
}
// not heartbeat timeout configured
if activityInfo.HeartbeatTimeout <= 0 {
return nil
}
// use the latest time as last heartbeat time
lastHeartbeat := activityInfo.StartedTime
if activityInfo.LastHeartBeatUpdatedTime.After(lastHeartbeat) {
lastHeartbeat = activityInfo.LastHeartBeatUpdatedTime
}
heartbeatTimeout := lastHeartbeat.Add(
time.Duration(activityInfo.HeartbeatTimeout) * time.Second,
)
return &timerSequenceID{
eventID: activityInfo.ScheduleID,
timestamp: heartbeatTimeout,
timerType: timerTypeHeartbeat,
timerCreated: (activityInfo.TimerTaskStatus & timerTaskStatusCreatedHeartbeat) > 0,
attempt: activityInfo.Attempt,
}
}
func timerTypeToTimerMask(
timerType timerType,
) int32 {
switch timerType {
case timerTypeStartToClose:
return timerTaskStatusCreatedStartToClose
case timerTypeScheduleToStart:
return timerTaskStatusCreatedScheduleToStart
case timerTypeScheduleToClose:
return timerTaskStatusCreatedScheduleToClose
case timerTypeHeartbeat:
return timerTaskStatusCreatedHeartbeat
default:
panic("invalid timeout type")
}
}
func timerTypeToThrift(
timerType timerType,
) shared.TimeoutType {
switch timerType {
case timerTypeStartToClose:
return shared.TimeoutTypeStartToClose
case timerTypeScheduleToStart:
return shared.TimeoutTypeScheduleToStart
case timerTypeScheduleToClose:
return shared.TimeoutTypeScheduleToClose
case timerTypeHeartbeat:
return shared.TimeoutTypeHeartbeat
default:
panic(fmt.Sprintf("invalid timer type: %v", timerType))
}
}
func timerTypeFromThrift(
timerType shared.TimeoutType,
) timerType {
switch timerType {
case shared.TimeoutTypeStartToClose:
return timerTypeStartToClose
case shared.TimeoutTypeScheduleToStart:
return timerTypeScheduleToStart
case shared.TimeoutTypeScheduleToClose:
return timerTypeScheduleToClose
case shared.TimeoutTypeHeartbeat:
return timerTypeHeartbeat
default:
panic(fmt.Sprintf("invalid timeout type: %v", timerType))
}
}
func timerTypeToReason(
timerType timerType,
) string {
return fmt.Sprintf("cadenceInternal:Timeout %v", timerTypeToThrift(timerType))
}
// Len implements sort.Interface
func (s timerSequenceIDs) Len() int {
return len(s)
}
// Swap implements sort.Interface.
func (s timerSequenceIDs) Swap(
this int,
that int,
) {
s[this], s[that] = s[that], s[this]
}
// Less implements sort.Interface
func (s timerSequenceIDs) Less(
this int,
that int,
) bool {
thisSequenceID := s[this]
thatSequenceID := s[that]
// order: timeout time, event ID, timeout type
if thisSequenceID.timestamp.Before(thatSequenceID.timestamp) {
return true
} else if thisSequenceID.timestamp.After(thatSequenceID.timestamp) {
return false
}
// timeout time are the same
if thisSequenceID.eventID < thatSequenceID.eventID {
return true
} else if thisSequenceID.eventID > thatSequenceID.eventID {
return false
}
// timeout time & event ID are the same
if thisSequenceID.timerType < thatSequenceID.timerType {
return true
} else if thisSequenceID.timerType > thatSequenceID.timerType {
return false
}
// thisSequenceID && thatSequenceID are the same
return true
}
| 1 | 9,141 | Need to add type name here to be compatible with proto string representation. | temporalio-temporal | go |
@@ -40,7 +40,7 @@ public class BazelBuild {
if (target == null || "".equals(target)) {
throw new IllegalStateException("No targets specified");
}
- log.info("\nBuilding " + target + " ...");
+ log.finest("\nBuilding " + target + " ...");
ImmutableList.Builder<String> builder = ImmutableList.builder();
builder.add("bazel", "build", target); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.build;
import com.google.common.collect.ImmutableList;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.os.CommandLine;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.logging.Logger;
public class BazelBuild {
private static Logger log = Logger.getLogger(BazelBuild.class.getName());
public void build(String target) {
Path projectRoot = InProject.findProjectRoot();
if (!Files.exists(projectRoot.resolve("Rakefile"))) {
// we're not in dev mode
return;
}
if (target == null || "".equals(target)) {
throw new IllegalStateException("No targets specified");
}
log.info("\nBuilding " + target + " ...");
ImmutableList.Builder<String> builder = ImmutableList.builder();
builder.add("bazel", "build", target);
ImmutableList<String> command = builder.build();
CommandLine commandLine = new CommandLine(command.toArray(new String[0]));
commandLine.setWorkingDirectory(projectRoot.toAbsolutePath().toString());
commandLine.copyOutputTo(System.err);
commandLine.execute();
if (!commandLine.isSuccessful()) {
throw new WebDriverException("Build failed! " + target + "\n" + commandLine.getStdOut());
}
}
}
| 1 | 17,120 | This is to let people know that the tooling is doing something during a build. Please leave. | SeleniumHQ-selenium | py |
@@ -17,8 +17,8 @@ MINIMALIST_COLLECTION = {'data': dict()}
MINIMALIST_GROUP = {'data': dict(members=['fxa:user'])}
MINIMALIST_RECORD = {'data': dict(name="Hulled Barley",
type="Whole Grain")}
-USER_PRINCIPAL = 'basicauth:8a931a10fc88ab2f6d1cc02a07d3a81b5d4768f' \
- '6f13e85c5d8d4180419acb1b4'
+USER_PRINCIPAL = 'basicauth:aaedca130273574dd2bd6c3acad57f3545b662a974fa4320' \
+ '236f25fe474676d6'
class BaseWebTest(object): | 1 | try:
import unittest2 as unittest
except ImportError:
import unittest # NOQA
import webtest
from cliquet import utils
from pyramid.security import IAuthorizationPolicy
from zope.interface import implementer
from cliquet.tests import support as cliquet_support
from kinto import main as testapp
from kinto import DEFAULT_SETTINGS
MINIMALIST_BUCKET = {'data': dict()}
MINIMALIST_COLLECTION = {'data': dict()}
MINIMALIST_GROUP = {'data': dict(members=['fxa:user'])}
MINIMALIST_RECORD = {'data': dict(name="Hulled Barley",
type="Whole Grain")}
USER_PRINCIPAL = 'basicauth:8a931a10fc88ab2f6d1cc02a07d3a81b5d4768f' \
'6f13e85c5d8d4180419acb1b4'
class BaseWebTest(object):
def __init__(self, *args, **kwargs):
super(BaseWebTest, self).__init__(*args, **kwargs)
self.principal = USER_PRINCIPAL
self.app = self._get_test_app()
self.storage = self.app.app.registry.storage
self.permission = self.app.app.registry.permission
self.permission.initialize_schema()
self.storage.initialize_schema()
self.headers = {
'Content-Type': 'application/json',
}
self.headers.update(get_user_headers('mat'))
def _get_test_app(self, settings=None):
app = webtest.TestApp(testapp({}, **self.get_app_settings(settings)))
app.RequestClass = cliquet_support.get_request_class(prefix="v1")
return app
def get_app_settings(self, additional_settings=None):
settings = cliquet_support.DEFAULT_SETTINGS.copy()
settings.update(**DEFAULT_SETTINGS)
settings['cliquet.cache_backend'] = 'cliquet.cache.memory'
settings['cliquet.storage_backend'] = 'cliquet.storage.memory'
settings['cliquet.permission_backend'] = 'cliquet.permission.memory'
settings['cliquet.project_name'] = 'cloud storage'
settings['cliquet.project_docs'] = 'https://kinto.rtfd.org/'
settings['multiauth.authorization_policy'] = (
'kinto.tests.support.AllowAuthorizationPolicy')
if additional_settings is not None:
settings.update(additional_settings)
return settings
def tearDown(self):
super(BaseWebTest, self).tearDown()
self.storage.flush()
self.permission.flush()
def create_group(self, bucket_id, group_id, members=None):
if members is None:
group = MINIMALIST_GROUP
else:
group = {'data': {'members': members}}
group_url = '/buckets/%s/groups/%s' % (bucket_id, group_id)
self.app.put_json(group_url, group,
headers=self.headers, status=201)
def create_bucket(self, bucket_id):
self.app.put_json('/buckets/%s' % bucket_id, MINIMALIST_BUCKET,
headers=self.headers, status=201)
@implementer(IAuthorizationPolicy)
class AllowAuthorizationPolicy(object):
def permits(self, context, principals, permission):
if USER_PRINCIPAL in principals:
return True
return False
def principals_allowed_by_permission(self, context, permission):
raise NotImplementedError() # PRAGMA NOCOVER
def get_user_headers(user):
credentials = "%s:secret" % user
authorization = 'Basic {0}'.format(utils.encode64(credentials))
return {
'Authorization': authorization
}
| 1 | 7,567 | Why is the principal changing here? | Kinto-kinto | py |
@@ -26,6 +26,11 @@
// The default URL opener will use credentials from the environment variables
// AZURE_STORAGE_ACCOUNT, AZURE_STORAGE_KEY, and AZURE_STORAGE_SAS_TOKEN.
// AZURE_STORAGE_ACCOUNT is required, along with one of the other two.
+// AZURE_BLOB_DOMAIN can optionally be used to provide an Azure Environment
+// blob storage domain to use. If no AZURE_BLOB_DOMAIN is provided, the
+// default Azure public domain "blob.core.windows.net" will be used. Check
+// the Azure Developer Guide for your particular cloud environment to see
+// the proper blob storage domain name to provide.
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information. | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package azureblob provides a blob implementation that uses Azure Storage’s
// BlockBlob. Use OpenBucket to construct a *blob.Bucket.
//
// NOTE: SignedURLs for PUT created with this package are not fully portable;
// they will not work unless the PUT request includes a "x-ms-blob-type" header
// set to "BlockBlob".
// See https://stackoverflow.com/questions/37824136/put-on-sas-blob-url-without-specifying-x-ms-blob-type-header.
//
// URLs
//
// For blob.OpenBucket, azureblob registers for the scheme "azblob".
// The default URL opener will use credentials from the environment variables
// AZURE_STORAGE_ACCOUNT, AZURE_STORAGE_KEY, and AZURE_STORAGE_SAS_TOKEN.
// AZURE_STORAGE_ACCOUNT is required, along with one of the other two.
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
// Escaping
//
// Go CDK supports all UTF-8 strings; to make this work with services lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for azureblob:
// - Blob keys: ASCII characters 0-31, 92 ("\"), and 127 are escaped to
// "__0x<hex>__". Additionally, the "/" in "../" and a trailing "/" in a
// key (e.g., "foo/") are escaped in the same way.
// - Metadata keys: Per https://docs.microsoft.com/en-us/azure/storage/blobs/storage-properties-metadata,
// Azure only allows C# identifiers as metadata keys. Therefore, characters
// other than "[a-z][A-z][0-9]_" are escaped using "__0x<hex>__". In addition,
// characters "[0-9]" are escaped when they start the string.
// URL encoding would not work since "%" is not valid.
// - Metadata values: Escaped using URL encoding.
//
// As
//
// azureblob exposes the following types for As:
// - Bucket: *azblob.ContainerURL
// - Error: azblob.StorageError
// - ListObject: azblob.BlobItem for objects, azblob.BlobPrefix for "directories"
// - ListOptions.BeforeList: *azblob.ListBlobsSegmentOptions
// - Reader: azblob.DownloadResponse
// - Reader.BeforeRead: *azblob.BlockBlobURL, *azblob.BlobAccessConditions
// - Attributes: azblob.BlobGetPropertiesResponse
// - CopyOptions.BeforeCopy: azblob.Metadata, *azblob.ModifiedAccessConditions, *azblob.BlobAccessConditions
// - WriterOptions.BeforeWrite: *azblob.UploadStreamToBlockBlobOptions
package azureblob
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/google/uuid"
"github.com/google/wire"
"gocloud.dev/blob"
"gocloud.dev/blob/driver"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/escape"
"gocloud.dev/internal/gcerr"
"gocloud.dev/internal/useragent"
)
// Options sets options for constructing a *blob.Bucket backed by Azure Block Blob.
type Options struct {
// Credential represents the authorizer for SignedURL.
// Required to use SignedURL.
Credential azblob.StorageAccountCredential
// SASToken can be provided along with anonymous credentials to use
// delegated privileges.
// See https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1#shared-access-signature-parameters.
SASToken SASToken
}
const (
defaultMaxDownloadRetryRequests = 3 // download retry policy (Azure default is zero)
defaultPageSize = 1000 // default page size for ListPaged (Azure default is 5000)
defaultUploadBuffers = 5 // configure the number of rotating buffers that are used when uploading (for degree of parallelism)
defaultUploadBlockSize = 8 * 1024 * 1024 // configure the upload buffer size
)
func init() {
blob.DefaultURLMux().RegisterBucket(Scheme, new(lazyCredsOpener))
}
// Set holds Wire providers for this package.
var Set = wire.NewSet(
NewPipeline,
wire.Struct(new(Options), "Credential", "SASToken"),
wire.Struct(new(URLOpener), "AccountName", "Pipeline", "Options"),
)
// lazyCredsOpener obtains credentials from the environment on the first call
// to OpenBucketURL.
type lazyCredsOpener struct {
init sync.Once
opener *URLOpener
err error
}
func (o *lazyCredsOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
o.init.Do(func() {
// Use default credential info from the environment.
// Ignore errors, as we'll get errors from OpenBucket later.
accountName, _ := DefaultAccountName()
accountKey, _ := DefaultAccountKey()
sasToken, _ := DefaultSASToken()
o.opener, o.err = openerFromEnv(accountName, accountKey, sasToken)
})
if o.err != nil {
return nil, fmt.Errorf("open bucket %v: %v", u, o.err)
}
return o.opener.OpenBucketURL(ctx, u)
}
// Scheme is the URL scheme gcsblob registers its URLOpener under on
// blob.DefaultMux.
const Scheme = "azblob"
// URLOpener opens Azure URLs like "azblob://mybucket".
//
// The URL host is used as the bucket name.
//
// No query parameters are supported.
type URLOpener struct {
// AccountName must be specified.
AccountName AccountName
// Pipeline must be set to a non-nil value.
Pipeline pipeline.Pipeline
// Options specifies the options to pass to OpenBucket.
Options Options
}
func openerFromEnv(accountName AccountName, accountKey AccountKey, sasToken SASToken) (*URLOpener, error) {
// azblob.Credential is an interface; we will use either a SharedKeyCredential
// or anonymous credentials. If the former, we will also fill in
// Options.Credential so that SignedURL will work.
var credential azblob.Credential
var storageAccountCredential azblob.StorageAccountCredential
if accountKey != "" {
sharedKeyCred, err := NewCredential(accountName, accountKey)
if err != nil {
return nil, fmt.Errorf("invalid credentials %s/%s: %v", accountName, accountKey, err)
}
credential = sharedKeyCred
storageAccountCredential = sharedKeyCred
} else {
credential = azblob.NewAnonymousCredential()
}
return &URLOpener{
AccountName: accountName,
Pipeline: NewPipeline(credential, azblob.PipelineOptions{}),
Options: Options{
Credential: storageAccountCredential,
SASToken: sasToken,
},
}, nil
}
// OpenBucketURL opens a blob.Bucket based on u.
func (o *URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
for k := range u.Query() {
return nil, fmt.Errorf("open bucket %v: invalid query parameter %q", u, k)
}
return OpenBucket(ctx, o.Pipeline, o.AccountName, u.Host, &o.Options)
}
// DefaultIdentity is a Wire provider set that provides an Azure storage
// account name, key, and SharedKeyCredential from environment variables.
var DefaultIdentity = wire.NewSet(
DefaultAccountName,
DefaultAccountKey,
NewCredential,
wire.Bind(new(azblob.Credential), new(*azblob.SharedKeyCredential)),
wire.Value(azblob.PipelineOptions{}),
)
// SASTokenIdentity is a Wire provider set that provides an Azure storage
// account name, SASToken, and anonymous credential from environment variables.
var SASTokenIdentity = wire.NewSet(
DefaultAccountName,
DefaultSASToken,
azblob.NewAnonymousCredential,
wire.Value(azblob.PipelineOptions{}),
)
// AccountName is an Azure storage account name.
type AccountName string
// AccountKey is an Azure storage account key (primary or secondary).
type AccountKey string
// SASToken is an Azure shared access signature.
// https://docs.microsoft.com/en-us/azure/storage/common/storage-dotnet-shared-access-signature-part-1
type SASToken string
// DefaultAccountName loads the Azure storage account name from the
// AZURE_STORAGE_ACCOUNT environment variable.
func DefaultAccountName() (AccountName, error) {
s := os.Getenv("AZURE_STORAGE_ACCOUNT")
if s == "" {
return "", errors.New("azureblob: environment variable AZURE_STORAGE_ACCOUNT not set")
}
return AccountName(s), nil
}
// DefaultAccountKey loads the Azure storage account key (primary or secondary)
// from the AZURE_STORAGE_KEY environment variable.
func DefaultAccountKey() (AccountKey, error) {
s := os.Getenv("AZURE_STORAGE_KEY")
if s == "" {
return "", errors.New("azureblob: environment variable AZURE_STORAGE_KEY not set")
}
return AccountKey(s), nil
}
// DefaultSASToken loads a Azure SAS token from the AZURE_STORAGE_SAS_TOKEN
// environment variable.
func DefaultSASToken() (SASToken, error) {
s := os.Getenv("AZURE_STORAGE_SAS_TOKEN")
if s == "" {
return "", errors.New("azureblob: environment variable AZURE_STORAGE_SAS_TOKEN not set")
}
return SASToken(s), nil
}
// NewCredential creates a SharedKeyCredential.
func NewCredential(accountName AccountName, accountKey AccountKey) (*azblob.SharedKeyCredential, error) {
return azblob.NewSharedKeyCredential(string(accountName), string(accountKey))
}
// NewPipeline creates a Pipeline for making HTTP requests to Azure.
func NewPipeline(credential azblob.Credential, opts azblob.PipelineOptions) pipeline.Pipeline {
opts.Telemetry.Value = useragent.AzureUserAgentPrefix("blob") + opts.Telemetry.Value
return azblob.NewPipeline(credential, opts)
}
// bucket represents a Azure Storage Account Container, which handles read,
// write and delete operations on objects within it.
// See https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction.
type bucket struct {
name string
pageMarkers map[string]azblob.Marker
serviceURL *azblob.ServiceURL
containerURL azblob.ContainerURL
opts *Options
}
// OpenBucket returns a *blob.Bucket backed by Azure Storage Account. See the package
// documentation for an example and
// https://godoc.org/github.com/Azure/azure-storage-blob-go/azblob
// for more details.
func OpenBucket(ctx context.Context, pipeline pipeline.Pipeline, accountName AccountName, containerName string, opts *Options) (*blob.Bucket, error) {
b, err := openBucket(ctx, pipeline, accountName, containerName, opts)
if err != nil {
return nil, err
}
return blob.NewBucket(b), nil
}
func openBucket(ctx context.Context, pipeline pipeline.Pipeline, accountName AccountName, containerName string, opts *Options) (*bucket, error) {
if pipeline == nil {
return nil, errors.New("azureblob.OpenBucket: pipeline is required")
}
if accountName == "" {
return nil, errors.New("azureblob.OpenBucket: accountName is required")
}
if containerName == "" {
return nil, errors.New("azureblob.OpenBucket: containerName is required")
}
if opts == nil {
opts = &Options{}
}
blobURL, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName))
if err != nil {
return nil, err
}
if opts.SASToken != "" {
// The Azure portal includes a leading "?" for the SASToken, which we
// don't want here.
blobURL.RawQuery = strings.TrimPrefix(string(opts.SASToken), "?")
}
serviceURL := azblob.NewServiceURL(*blobURL, pipeline)
return &bucket{
name: containerName,
pageMarkers: map[string]azblob.Marker{},
serviceURL: &serviceURL,
containerURL: serviceURL.NewContainerURL(containerName),
opts: opts,
}, nil
}
// Close implements driver.Close.
func (b *bucket) Close() error {
return nil
}
// Copy implements driver.Copy.
func (b *bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *driver.CopyOptions) error {
dstKey = escapeKey(dstKey, false)
dstBlobURL := b.containerURL.NewBlobURL(dstKey)
srcKey = escapeKey(srcKey, false)
srcURL := b.containerURL.NewBlobURL(srcKey).URL()
md := azblob.Metadata{}
mac := azblob.ModifiedAccessConditions{}
bac := azblob.BlobAccessConditions{}
if opts.BeforeCopy != nil {
asFunc := func(i interface{}) bool {
switch v := i.(type) {
case *azblob.Metadata:
*v = md
return true
case **azblob.ModifiedAccessConditions:
*v = &mac
return true
case **azblob.BlobAccessConditions:
*v = &bac
return true
}
return false
}
if err := opts.BeforeCopy(asFunc); err != nil {
return err
}
}
resp, err := dstBlobURL.StartCopyFromURL(ctx, srcURL, md, mac, bac)
if err != nil {
return err
}
copyStatus := resp.CopyStatus()
nErrors := 0
for copyStatus == azblob.CopyStatusPending {
// Poll until the copy is complete.
time.Sleep(500 * time.Millisecond)
propertiesResp, err := dstBlobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
if err != nil {
// A GetProperties failure may be transient, so allow a couple
// of them before giving up.
nErrors++
if ctx.Err() != nil || nErrors == 3 {
return err
}
}
copyStatus = propertiesResp.CopyStatus()
}
if copyStatus != azblob.CopyStatusSuccess {
return fmt.Errorf("Copy failed with status: %s", copyStatus)
}
return nil
}
// Delete implements driver.Delete.
func (b *bucket) Delete(ctx context.Context, key string) error {
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
_, err := blockBlobURL.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
return err
}
// reader reads an azblob. It implements io.ReadCloser.
type reader struct {
body io.ReadCloser
attrs driver.ReaderAttributes
raw *azblob.DownloadResponse
}
func (r *reader) Read(p []byte) (int, error) {
return r.body.Read(p)
}
func (r *reader) Close() error {
return r.body.Close()
}
func (r *reader) Attributes() *driver.ReaderAttributes {
return &r.attrs
}
func (r *reader) As(i interface{}) bool {
p, ok := i.(*azblob.DownloadResponse)
if !ok {
return false
}
*p = *r.raw
return true
}
// NewRangeReader implements driver.NewRangeReader.
func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) {
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
blockBlobURLp := &blockBlobURL
accessConditions := &azblob.BlobAccessConditions{}
end := length
if end < 0 {
end = azblob.CountToEnd
}
if opts.BeforeRead != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(**azblob.BlockBlobURL); ok {
*p = blockBlobURLp
return true
}
if p, ok := i.(**azblob.BlobAccessConditions); ok {
*p = accessConditions
return true
}
return false
}
if err := opts.BeforeRead(asFunc); err != nil {
return nil, err
}
}
blobDownloadResponse, err := blockBlobURLp.Download(ctx, offset, end, *accessConditions, false)
if err != nil {
return nil, err
}
attrs := driver.ReaderAttributes{
ContentType: blobDownloadResponse.ContentType(),
Size: getSize(blobDownloadResponse.ContentLength(), blobDownloadResponse.ContentRange()),
ModTime: blobDownloadResponse.LastModified(),
}
var body io.ReadCloser
if length == 0 {
body = http.NoBody
} else {
body = blobDownloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: defaultMaxDownloadRetryRequests})
}
return &reader{
body: body,
attrs: attrs,
raw: blobDownloadResponse,
}, nil
}
func getSize(contentLength int64, contentRange string) int64 {
// Default size to ContentLength, but that's incorrect for partial-length reads,
// where ContentLength refers to the size of the returned Body, not the entire
// size of the blob. ContentRange has the full size.
size := contentLength
if contentRange != "" {
// Sample: bytes 10-14/27 (where 27 is the full size).
parts := strings.Split(contentRange, "/")
if len(parts) == 2 {
if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil {
size = i
}
}
}
return size
}
// As implements driver.As.
func (b *bucket) As(i interface{}) bool {
p, ok := i.(**azblob.ContainerURL)
if !ok {
return false
}
*p = &b.containerURL
return true
}
// As implements driver.ErrorAs.
func (b *bucket) ErrorAs(err error, i interface{}) bool {
switch v := err.(type) {
case azblob.StorageError:
if p, ok := i.(*azblob.StorageError); ok {
*p = v
return true
}
}
return false
}
func (b *bucket) ErrorCode(err error) gcerrors.ErrorCode {
if code := gcerrors.Code(err); code != gcerrors.Unknown {
return code
}
serr, ok := err.(azblob.StorageError)
switch {
case !ok:
return gcerrors.Unknown
case serr.ServiceCode() == azblob.ServiceCodeBlobNotFound || serr.Response().StatusCode == 404:
// Check and fail both the SDK ServiceCode and the Http Response Code for NotFound
return gcerrors.NotFound
default:
return gcerrors.Unknown
}
}
// Attributes implements driver.Attributes.
func (b *bucket) Attributes(ctx context.Context, key string) (*driver.Attributes, error) {
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
blobPropertiesResponse, err := blockBlobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
if err != nil {
return nil, err
}
azureMD := blobPropertiesResponse.NewMetadata()
md := make(map[string]string, len(azureMD))
for k, v := range azureMD {
// See the package comments for more details on escaping of metadata
// keys & values.
md[escape.HexUnescape(k)] = escape.URLUnescape(v)
}
return &driver.Attributes{
CacheControl: blobPropertiesResponse.CacheControl(),
ContentDisposition: blobPropertiesResponse.ContentDisposition(),
ContentEncoding: blobPropertiesResponse.ContentEncoding(),
ContentLanguage: blobPropertiesResponse.ContentLanguage(),
ContentType: blobPropertiesResponse.ContentType(),
Size: blobPropertiesResponse.ContentLength(),
MD5: blobPropertiesResponse.ContentMD5(),
ModTime: blobPropertiesResponse.LastModified(),
Metadata: md,
AsFunc: func(i interface{}) bool {
p, ok := i.(*azblob.BlobGetPropertiesResponse)
if !ok {
return false
}
*p = *blobPropertiesResponse
return true
},
}, nil
}
// ListPaged implements driver.ListPaged.
func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) {
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
marker := azblob.Marker{}
if len(opts.PageToken) > 0 {
if m, ok := b.pageMarkers[string(opts.PageToken)]; ok {
marker = m
}
}
azOpts := azblob.ListBlobsSegmentOptions{
MaxResults: int32(pageSize),
Prefix: escapeKey(opts.Prefix, true),
}
if opts.BeforeList != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**azblob.ListBlobsSegmentOptions)
if !ok {
return false
}
*p = &azOpts
return true
}
if err := opts.BeforeList(asFunc); err != nil {
return nil, err
}
}
listBlob, err := b.containerURL.ListBlobsHierarchySegment(ctx, marker, escapeKey(opts.Delimiter, true), azOpts)
if err != nil {
return nil, err
}
page := &driver.ListPage{}
page.Objects = []*driver.ListObject{}
for _, blobPrefix := range listBlob.Segment.BlobPrefixes {
page.Objects = append(page.Objects, &driver.ListObject{
Key: unescapeKey(blobPrefix.Name),
Size: 0,
IsDir: true,
AsFunc: func(i interface{}) bool {
p, ok := i.(*azblob.BlobPrefix)
if !ok {
return false
}
*p = blobPrefix
return true
}})
}
for _, blobInfo := range listBlob.Segment.BlobItems {
page.Objects = append(page.Objects, &driver.ListObject{
Key: unescapeKey(blobInfo.Name),
ModTime: blobInfo.Properties.LastModified,
Size: *blobInfo.Properties.ContentLength,
MD5: blobInfo.Properties.ContentMD5,
IsDir: false,
AsFunc: func(i interface{}) bool {
p, ok := i.(*azblob.BlobItem)
if !ok {
return false
}
*p = blobInfo
return true
},
})
}
if listBlob.NextMarker.NotDone() {
token := uuid.New().String()
b.pageMarkers[token] = listBlob.NextMarker
page.NextPageToken = []byte(token)
}
if len(listBlob.Segment.BlobPrefixes) > 0 && len(listBlob.Segment.BlobItems) > 0 {
sort.Slice(page.Objects, func(i, j int) bool {
return page.Objects[i].Key < page.Objects[j].Key
})
}
return page, nil
}
// SignedURL implements driver.SignedURL.
func (b *bucket) SignedURL(ctx context.Context, key string, opts *driver.SignedURLOptions) (string, error) {
if b.opts.Credential == nil {
return "", errors.New("azureblob: to use SignedURL, you must call OpenBucket with a non-nil Options.Credential")
}
if opts.ContentType != "" || opts.EnforceAbsentContentType {
return "", gcerr.New(gcerr.Unimplemented, nil, 1, "azureblob: does not enforce Content-Type on PUT")
}
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
srcBlobParts := azblob.NewBlobURLParts(blockBlobURL.URL())
perms := azblob.BlobSASPermissions{}
switch opts.Method {
case http.MethodGet:
perms.Read = true
case http.MethodPut:
perms.Create = true
perms.Write = true
case http.MethodDelete:
perms.Delete = true
default:
return "", fmt.Errorf("unsupported Method %s", opts.Method)
}
var err error
srcBlobParts.SAS, err = azblob.BlobSASSignatureValues{
Protocol: azblob.SASProtocolHTTPS,
ExpiryTime: time.Now().UTC().Add(opts.Expiry),
ContainerName: b.name,
BlobName: srcBlobParts.BlobName,
Permissions: perms.String(),
}.NewSASQueryParameters(b.opts.Credential)
if err != nil {
return "", err
}
srcBlobURLWithSAS := srcBlobParts.URL()
return srcBlobURLWithSAS.String(), nil
}
type writer struct {
ctx context.Context
blockBlobURL *azblob.BlockBlobURL
uploadOpts *azblob.UploadStreamToBlockBlobOptions
w *io.PipeWriter
donec chan struct{}
err error
}
// escapeKey does all required escaping for UTF-8 strings to work with Azure.
// isPrefix indicates whether the key is a full key, or a prefix/delimiter.
func escapeKey(key string, isPrefix bool) string {
return escape.HexEscape(key, func(r []rune, i int) bool {
c := r[i]
switch {
// Azure does not work well with backslashes in blob names.
case c == '\\':
return true
// Azure doesn't handle these characters (determined via experimentation).
case c < 32 || c == 127:
return true
// Escape trailing "/" for full keys, otherwise Azure can't address them
// consistently.
case !isPrefix && i == len(key)-1 && c == '/':
return true
// For "../", escape the trailing slash.
case i > 1 && r[i] == '/' && r[i-1] == '.' && r[i-2] == '.':
return true
}
return false
})
}
// unescapeKey reverses escapeKey.
func unescapeKey(key string) string {
return escape.HexUnescape(key)
}
// NewTypedWriter implements driver.NewTypedWriter.
func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) {
key = escapeKey(key, false)
blockBlobURL := b.containerURL.NewBlockBlobURL(key)
if opts.BufferSize == 0 {
opts.BufferSize = defaultUploadBlockSize
}
md := make(map[string]string, len(opts.Metadata))
for k, v := range opts.Metadata {
// See the package comments for more details on escaping of metadata
// keys & values.
e := escape.HexEscape(k, func(runes []rune, i int) bool {
c := runes[i]
switch {
case i == 0 && c >= '0' && c <= '9':
return true
case escape.IsASCIIAlphanumeric(c):
return false
case c == '_':
return false
}
return true
})
if _, ok := md[e]; ok {
return nil, fmt.Errorf("duplicate keys after escaping: %q => %q", k, e)
}
md[e] = escape.URLEscape(v)
}
uploadOpts := &azblob.UploadStreamToBlockBlobOptions{
BufferSize: opts.BufferSize,
MaxBuffers: defaultUploadBuffers,
Metadata: md,
BlobHTTPHeaders: azblob.BlobHTTPHeaders{
CacheControl: opts.CacheControl,
ContentDisposition: opts.ContentDisposition,
ContentEncoding: opts.ContentEncoding,
ContentLanguage: opts.ContentLanguage,
ContentMD5: opts.ContentMD5,
ContentType: contentType,
},
}
if opts.BeforeWrite != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**azblob.UploadStreamToBlockBlobOptions)
if !ok {
return false
}
*p = uploadOpts
return true
}
if err := opts.BeforeWrite(asFunc); err != nil {
return nil, err
}
}
return &writer{
ctx: ctx,
blockBlobURL: &blockBlobURL,
uploadOpts: uploadOpts,
donec: make(chan struct{}),
}, nil
}
// Write appends p to w. User must call Close to close the w after done writing.
func (w *writer) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
if w.w == nil {
pr, pw := io.Pipe()
w.w = pw
if err := w.open(pr); err != nil {
return 0, err
}
}
return w.w.Write(p)
}
func (w *writer) open(pr *io.PipeReader) error {
go func() {
defer close(w.donec)
var body io.Reader
if pr == nil {
body = http.NoBody
} else {
body = pr
}
_, w.err = azblob.UploadStreamToBlockBlob(w.ctx, body, *w.blockBlobURL, *w.uploadOpts)
if w.err != nil {
if pr != nil {
pr.CloseWithError(w.err)
}
return
}
}()
return nil
}
// Close completes the writer and closes it. Any error occurring during write will
// be returned. If a writer is closed before any Write is called, Close will
// create an empty file at the given key.
func (w *writer) Close() error {
if w.w == nil {
w.open(nil)
} else if err := w.w.Close(); err != nil {
return err
}
<-w.donec
return w.err
}
| 1 | 19,859 | Naming nit: let's use `AZURE_STORAGE_` prefix for consistency with the other ones. | google-go-cloud | go |
@@ -0,0 +1,18 @@
+const Plugin = require('../../core/Plugin')
+
+module.exports = class ProgressBar extends Plugin {
+ constructor (uppy, opts) {
+ super(uppy, opts)
+
+ this.id = opts.id
+ this.type = 'progressindicator'
+ }
+
+ install () {
+ this.opts.onInstall()
+ }
+
+ uninstall () {
+ this.opts.onUninstall()
+ }
+} | 1 | 1 | 10,848 | I'm a bit confused. We have an actual `ProgressBar` Uppy React wrapper component that we are testing. Why do we need a mock for it? | transloadit-uppy | js |
|
@@ -1,6 +1,9 @@
package v1alpha1
import (
+ "errors"
+ "strings"
+
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
| 1 | package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kclient "github.com/openebs/maya/pkg/client/k8s/v1alpha1"
clientset "k8s.io/client-go/kubernetes"
)
// getClientsetFn is a typed function that
// abstracts fetching of clientset
type getClientsetFn func() (clientset *clientset.Clientset, err error)
// listFn is a typed function that abstracts
// listing of pvcs
type listFn func(cli *clientset.Clientset, namespace string, opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error)
// deleteFn is a typed function that abstracts
// deletion of pvcs
type deleteFn func(cli *clientset.Clientset, namespace string, name string, deleteOpts *metav1.DeleteOptions) error
// deleteFn is a typed function that abstracts
// deletion of pvc's collection
type deleteCollectionFn func(cli *clientset.Clientset, namespace string, listOpts metav1.ListOptions, deleteOpts *metav1.DeleteOptions) error
// kubeclient enables kubernetes API operations
// on pvc instance
type kubeclient struct {
// clientset refers to pvc clientset
// that will be responsible to
// make kubernetes API calls
clientset *clientset.Clientset
// namespace holds the namespace on which
// kubeclient has to operate
namespace string
// functions useful during mocking
getClientset getClientsetFn
list listFn
del deleteFn
delCollection deleteCollectionFn
}
// kubeclientBuildOption abstracts creating an
// instance of kubeclient
type kubeclientBuildOption func(*kubeclient)
// withDefaults sets the default options
// of kubeclient instance
func (k *kubeclient) withDefaults() {
if k.getClientset == nil {
k.getClientset = func() (clients *clientset.Clientset, err error) {
config, err := kclient.Config().Get()
if err != nil {
return nil, err
}
return clientset.NewForConfig(config)
}
}
if k.list == nil {
k.list = func(cli *clientset.Clientset, namespace string, opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error) {
return cli.CoreV1().PersistentVolumeClaims(namespace).List(opts)
}
}
if k.del == nil {
k.del = func(cli *clientset.Clientset, namespace string, name string, deleteOpts *metav1.DeleteOptions) error {
return cli.CoreV1().PersistentVolumeClaims(namespace).Delete(name, deleteOpts)
}
}
if k.delCollection == nil {
k.delCollection = func(cli *clientset.Clientset, namespace string, listOpts metav1.ListOptions, deleteOpts *metav1.DeleteOptions) error {
return cli.CoreV1().PersistentVolumeClaims(namespace).DeleteCollection(deleteOpts, listOpts)
}
}
}
// WithNamespace sets the kubernetes client against
// the provided namespace
func WithNamespace(namespace string) kubeclientBuildOption {
return func(k *kubeclient) {
k.namespace = namespace
}
}
// WithClientSet sets the kubernetes client against
// the kubeclient instance
func WithClientSet(c *clientset.Clientset) kubeclientBuildOption {
return func(k *kubeclient) {
k.clientset = c
}
}
// KubeClient returns a new instance of kubeclient meant for
// cstor volume replica operations
func KubeClient(opts ...kubeclientBuildOption) *kubeclient {
k := &kubeclient{}
for _, o := range opts {
o(k)
}
k.withDefaults()
return k
}
// getClientOrCached returns either a new instance
// of kubernetes client or its cached copy
func (k *kubeclient) getClientOrCached() (*clientset.Clientset, error) {
if k.clientset != nil {
return k.clientset, nil
}
c, err := k.getClientset()
if err != nil {
return nil, err
}
k.clientset = c
return k.clientset, nil
}
// List returns a list of pvc
// instances present in kubernetes cluster
func (k *kubeclient) List(opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error) {
cli, err := k.getClientOrCached()
if err != nil {
return nil, err
}
return k.list(cli, k.namespace, opts)
}
// Delete deletes a pvc instance from the
// kubecrnetes cluster
func (k *kubeclient) Delete(name string, deleteOpts *metav1.DeleteOptions) error {
cli, err := k.getClientOrCached()
if err != nil {
return err
}
return k.del(cli, k.namespace, name, deleteOpts)
}
// DeleteCollection deletes collection of pvc
// instance from the kubernetes cluster
func (k *kubeclient) DeleteCollection(listOpts metav1.ListOptions, deleteOpts *metav1.DeleteOptions) error {
cli, err := k.getClientOrCached()
if err != nil {
return err
}
return k.delCollection(cli, k.namespace, listOpts, deleteOpts)
}
| 1 | 12,644 | can we use `github.com/pkg/errors`? | openebs-maya | go |
@@ -72,15 +72,15 @@ func Compile(scope Scope, f *semantic.FunctionExpression, in semantic.MonoType)
// inType and mapping any variables to the value in the other record.
// If the input type is not a type variable, it will check to ensure
// that the type in the input matches or it will return an error.
-func substituteTypes(subst map[uint64]semantic.MonoType, inType, in semantic.MonoType) error {
+func substituteTypes(subst map[uint64]semantic.MonoType, inferredType, actualType semantic.MonoType) error {
// If the input isn't a valid type, then don't consider it as
// part of substituting types. We will trust type inference has
// the correct type and that we are just handling a null value
// which isn't represented in type inference.
- if in.Nature() == semantic.Invalid {
+ if actualType.Nature() == semantic.Invalid {
return nil
- } else if inType.Kind() == semantic.Var {
- vn, err := inType.VarNum()
+ } else if inferredType.Kind() == semantic.Var {
+ vn, err := inferredType.VarNum()
if err != nil {
return err
} | 1 | package compiler
import (
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
)
func Compile(scope Scope, f *semantic.FunctionExpression, in semantic.MonoType) (Func, error) {
if scope == nil {
scope = NewScope()
}
if in.Nature() != semantic.Object {
return nil, errors.Newf(codes.Invalid, "function input must be an object @ %v", f.Location())
}
// Retrieve the function argument types and create an object type from them.
fnType := f.TypeOf()
argN, err := fnType.NumArguments()
if err != nil {
return nil, err
}
// Iterate over every argument and find the equivalent
// property inside of the input.
// The function expression has a monotype that may have
// tvars contained within it. We have a realized input type
// so we can use that to construct the tvar substitutions.
// Iterate over every argument and find the equivalent
// property inside of the input and then generate the substitutions.
subst := make(map[uint64]semantic.MonoType)
for i := 0; i < argN; i++ {
arg, err := fnType.Argument(i)
if err != nil {
return nil, err
}
name := arg.Name()
argT, err := arg.TypeOf()
if err != nil {
return nil, err
}
prop, ok, err := findProperty(string(name), in)
if err != nil {
return nil, err
} else if ok {
mtyp, err := prop.TypeOf()
if err != nil {
return nil, err
}
if err := substituteTypes(subst, argT, mtyp); err != nil {
return nil, err
}
} else if !arg.Optional() {
return nil, errors.Newf(codes.Invalid, "missing required argument %q", string(name))
}
}
root, err := compile(f.Block, subst, scope)
if err != nil {
return nil, errors.Wrapf(err, codes.Inherit, "cannot compile @ %v", f.Location())
}
return compiledFn{
root: root,
inputScope: nestScope(scope),
}, nil
}
// substituteTypes will generate a substitution map by recursing through
// inType and mapping any variables to the value in the other record.
// If the input type is not a type variable, it will check to ensure
// that the type in the input matches or it will return an error.
func substituteTypes(subst map[uint64]semantic.MonoType, inType, in semantic.MonoType) error {
// If the input isn't a valid type, then don't consider it as
// part of substituting types. We will trust type inference has
// the correct type and that we are just handling a null value
// which isn't represented in type inference.
if in.Nature() == semantic.Invalid {
return nil
} else if inType.Kind() == semantic.Var {
vn, err := inType.VarNum()
if err != nil {
return err
}
// If this substitution variable already exists,
// we need to verify that it maps to the same type
// in the input record.
// We can do this by calling substituteTypes with the same
// input parameter and the substituted monotype since
// substituteTypes will verify the types.
if t, ok := subst[vn]; ok {
return substituteTypes(subst, t, in)
}
// If the input type is not invalid, mark it down
// as the real type.
if in.Nature() != semantic.Invalid {
subst[vn] = in
}
return nil
}
if inType.Kind() != in.Kind() {
return errors.Newf(codes.FailedPrecondition, "type conflict: %s != %s", inType, in)
}
switch inType.Kind() {
case semantic.Basic:
at, err := inType.Basic()
if err != nil {
return err
}
// Otherwise we have a valid type and need to ensure they match.
bt, err := in.Basic()
if err != nil {
return err
}
if at != bt {
return errors.Newf(codes.FailedPrecondition, "type conflict: %s != %s", inType, in)
}
return nil
case semantic.Arr:
lt, err := inType.ElemType()
if err != nil {
return err
}
rt, err := in.ElemType()
if err != nil {
return err
}
return substituteTypes(subst, lt, rt)
case semantic.Record:
// We need to compare the Record type that was inferred
// and the reality. It is ok for Record properties to exist
// in the real type that aren't in the inferred type and
// it is ok for inferred types to be missing from the actual
// input type in the case of null values.
// What isn't ok is that the two types conflict so we are
// going to iterate over all of the properties in the inferred
// type and perform substitutions on them.
nproperties, err := inType.NumProperties()
if err != nil {
return err
}
names := make([]string, 0, nproperties)
for i := 0; i < nproperties; i++ {
lprop, err := inType.RecordProperty(i)
if err != nil {
return err
}
// Record the name of the property in the input type.
name := lprop.Name()
names = append(names, name)
// Find the property in the real type if it
// exists. If it doesn't exist, then no problem!
rprop, ok, err := findProperty(name, in)
if err != nil {
return err
} else if !ok {
// It is ok if this property doesn't exist
// in the input type.
continue
}
ltyp, err := lprop.TypeOf()
if err != nil {
return err
}
rtyp, err := rprop.TypeOf()
if err != nil {
return err
}
if err := substituteTypes(subst, ltyp, rtyp); err != nil {
return err
}
}
// If this object extends another, then find all of the labels
// in the in value that were not referenced by the type.
if withType, ok, err := inType.Extends(); err != nil {
return err
} else if ok {
// Construct the input by filtering any of the names
// that were referenced above. This way, extends only
// includes the unreferenced labels.
nproperties, err := in.NumProperties()
if err != nil {
return err
}
properties := make([]semantic.PropertyType, 0, nproperties)
for i := 0; i < nproperties; i++ {
prop, err := in.RecordProperty(i)
if err != nil {
return err
}
name := prop.Name()
if containsStr(names, name) {
// Already referenced so don't pass this
// to the extends portion.
continue
}
typ, err := prop.TypeOf()
if err != nil {
return err
}
properties = append(properties, semantic.PropertyType{
Key: []byte(name),
Value: typ,
})
}
with := semantic.NewObjectType(properties)
if err := substituteTypes(subst, withType, with); err != nil {
return err
}
}
return nil
case semantic.Fun:
// TODO: https://github.com/influxdata/flux/issues/2587
return errors.New(codes.Unimplemented)
default:
return errors.Newf(codes.Internal, "unknown semantic kind: %s", inType)
}
}
func findProperty(name string, t semantic.MonoType) (*semantic.RecordProperty, bool, error) {
n, err := t.NumProperties()
if err != nil {
return nil, false, err
}
for i := 0; i < n; i++ {
p, err := t.RecordProperty(i)
if err != nil {
return nil, false, err
}
if p.Name() == name {
return p, true, nil
}
}
return nil, false, nil
}
// apply applies a substitution to a type.
// It will ignore any errors when reading a type.
// This is safe becase we already validated that the function type is a monotype.
func apply(sub map[uint64]semantic.MonoType, props []semantic.PropertyType, t semantic.MonoType) semantic.MonoType {
switch t.Kind() {
case semantic.Unknown, semantic.Basic:
// Basic types do not contain type variables.
// As a result there is nothing to substitute.
return t
case semantic.Var:
tv, err := t.VarNum()
if err != nil {
return t
}
ty, ok := sub[tv]
if !ok {
return t
}
return ty
case semantic.Arr:
element, err := t.ElemType()
if err != nil {
return t
}
return semantic.NewArrayType(apply(sub, props, element))
case semantic.Record:
n, err := t.NumProperties()
if err != nil {
return t
}
for i := 0; i < n; i++ {
pr, err := t.RecordProperty(i)
if err != nil {
return t
}
ty, err := pr.TypeOf()
if err != nil {
return t
}
props = append(props, semantic.PropertyType{
Key: []byte(pr.Name()),
Value: apply(sub, nil, ty),
})
}
r, extends, err := t.Extends()
if err != nil {
return t
}
if !extends {
return semantic.NewObjectType(props)
}
r = apply(sub, nil, r)
switch r.Kind() {
case semantic.Record:
return apply(sub, props, r)
case semantic.Var:
tv, err := r.VarNum()
if err != nil {
return t
}
return semantic.ExtendObjectType(props, &tv)
}
case semantic.Fun:
n, err := t.NumArguments()
if err != nil {
return t
}
args := make([]semantic.ArgumentType, n)
for i := 0; i < n; i++ {
arg, err := t.Argument(i)
if err != nil {
return t
}
typ, err := arg.TypeOf()
if err != nil {
return t
}
args[i] = semantic.ArgumentType{
Name: arg.Name(),
Type: apply(sub, nil, typ),
Pipe: arg.Pipe(),
Optional: arg.Optional(),
}
}
retn, err := t.ReturnType()
if err != nil {
return t
}
return semantic.NewFunctionType(apply(sub, nil, retn), args)
}
// If none of the above cases are matched, something has gone
// seriously wrong and we should panic.
panic("unknown type")
}
// compile recursively compiles semantic nodes into evaluators.
func compile(n semantic.Node, subst map[uint64]semantic.MonoType, scope Scope) (Evaluator, error) {
switch n := n.(type) {
case *semantic.Block:
body := make([]Evaluator, len(n.Body))
for i, s := range n.Body {
node, err := compile(s, subst, scope)
if err != nil {
return nil, err
}
body[i] = node
}
return &blockEvaluator{
t: apply(subst, nil, n.ReturnStatement().Argument.TypeOf()),
body: body,
}, nil
case *semantic.ExpressionStatement:
return nil, errors.New(codes.Internal, "statement does nothing, side effects are not supported by the compiler")
case *semantic.ReturnStatement:
node, err := compile(n.Argument, subst, scope)
if err != nil {
return nil, err
}
return returnEvaluator{
Evaluator: node,
}, nil
case *semantic.NativeVariableAssignment:
node, err := compile(n.Init, subst, scope)
if err != nil {
return nil, err
}
return &declarationEvaluator{
t: apply(subst, nil, n.Init.TypeOf()),
id: n.Identifier.Name,
init: node,
}, nil
case *semantic.ObjectExpression:
properties := make(map[string]Evaluator, len(n.Properties))
for _, p := range n.Properties {
node, err := compile(p.Value, subst, scope)
if err != nil {
return nil, err
}
properties[p.Key.Key()] = node
}
var extends *identifierEvaluator
if n.With != nil {
node, err := compile(n.With, subst, scope)
if err != nil {
return nil, err
}
with, ok := node.(*identifierEvaluator)
if !ok {
return nil, errors.New(codes.Internal, "unknown identifier in with expression")
}
extends = with
}
return &objEvaluator{
t: apply(subst, nil, n.TypeOf()),
properties: properties,
with: extends,
}, nil
case *semantic.ArrayExpression:
var elements []Evaluator
if len(n.Elements) > 0 {
elements = make([]Evaluator, len(n.Elements))
for i, e := range n.Elements {
node, err := compile(e, subst, scope)
if err != nil {
return nil, err
}
elements[i] = node
}
}
return &arrayEvaluator{
t: apply(subst, nil, n.TypeOf()),
array: elements,
}, nil
case *semantic.IdentifierExpression:
return &identifierEvaluator{
t: apply(subst, nil, n.TypeOf()),
name: n.Name,
}, nil
case *semantic.MemberExpression:
object, err := compile(n.Object, subst, scope)
if err != nil {
return nil, err
}
return &memberEvaluator{
t: apply(subst, nil, n.TypeOf()),
object: object,
property: n.Property,
}, nil
case *semantic.IndexExpression:
arr, err := compile(n.Array, subst, scope)
if err != nil {
return nil, err
}
idx, err := compile(n.Index, subst, scope)
if err != nil {
return nil, err
}
return &arrayIndexEvaluator{
t: apply(subst, nil, n.TypeOf()),
array: arr,
index: idx,
}, nil
case *semantic.StringExpression:
parts := make([]Evaluator, len(n.Parts))
for i, p := range n.Parts {
e, err := compile(p, subst, scope)
if err != nil {
return nil, err
}
parts[i] = e
}
return &stringExpressionEvaluator{
parts: parts,
}, nil
case *semantic.TextPart:
return &textEvaluator{
value: n.Value,
}, nil
case *semantic.InterpolatedPart:
e, err := compile(n.Expression, subst, scope)
if err != nil {
return nil, err
}
return &interpolatedEvaluator{
s: e,
}, nil
case *semantic.BooleanLiteral:
return &booleanEvaluator{
b: n.Value,
}, nil
case *semantic.IntegerLiteral:
return &integerEvaluator{
i: n.Value,
}, nil
case *semantic.UnsignedIntegerLiteral:
return &unsignedIntegerEvaluator{
i: n.Value,
}, nil
case *semantic.FloatLiteral:
return &floatEvaluator{
f: n.Value,
}, nil
case *semantic.StringLiteral:
return &stringEvaluator{
s: n.Value,
}, nil
case *semantic.RegexpLiteral:
return ®expEvaluator{
r: n.Value,
}, nil
case *semantic.DateTimeLiteral:
return &timeEvaluator{
time: values.ConvertTime(n.Value),
}, nil
case *semantic.DurationLiteral:
v, err := values.FromDurationValues(n.Values)
if err != nil {
return nil, err
}
return &durationEvaluator{
duration: v,
}, nil
case *semantic.UnaryExpression:
node, err := compile(n.Argument, subst, scope)
if err != nil {
return nil, err
}
return &unaryEvaluator{
t: apply(subst, nil, n.TypeOf()),
node: node,
op: n.Operator,
}, nil
case *semantic.LogicalExpression:
l, err := compile(n.Left, subst, scope)
if err != nil {
return nil, err
}
r, err := compile(n.Right, subst, scope)
if err != nil {
return nil, err
}
return &logicalEvaluator{
operator: n.Operator,
left: l,
right: r,
}, nil
case *semantic.ConditionalExpression:
test, err := compile(n.Test, subst, scope)
if err != nil {
return nil, err
}
c, err := compile(n.Consequent, subst, scope)
if err != nil {
return nil, err
}
a, err := compile(n.Alternate, subst, scope)
if err != nil {
return nil, err
}
return &conditionalEvaluator{
test: test,
consequent: c,
alternate: a,
}, nil
case *semantic.BinaryExpression:
l, err := compile(n.Left, subst, scope)
if err != nil {
return nil, err
}
lt := l.Type().Nature()
r, err := compile(n.Right, subst, scope)
if err != nil {
return nil, err
}
rt := r.Type().Nature()
if lt == semantic.Invalid {
lt = rt
} else if rt == semantic.Invalid {
rt = lt
}
f, err := values.LookupBinaryFunction(values.BinaryFuncSignature{
Operator: n.Operator,
Left: lt,
Right: rt,
})
if err != nil {
return nil, err
}
return &binaryEvaluator{
t: apply(subst, nil, n.TypeOf()),
left: l,
right: r,
f: f,
}, nil
case *semantic.CallExpression:
args, err := compile(n.Arguments, subst, scope)
if err != nil {
return nil, err
}
if n.Pipe != nil {
pipeArg, err := n.Callee.TypeOf().PipeArgument()
if err != nil {
return nil, err
}
if pipeArg == nil {
// This should be caught during type inference
return nil, errors.Newf(codes.Internal, "callee lacks a pipe argument, but one was provided")
}
pipe, err := compile(n.Pipe, subst, scope)
if err != nil {
return nil, err
}
args.(*objEvaluator).properties[string(pipeArg.Name())] = pipe
}
callee, err := compile(n.Callee, subst, scope)
if err != nil {
return nil, err
}
return &callEvaluator{
t: apply(subst, nil, n.TypeOf()),
callee: callee,
args: args,
}, nil
case *semantic.FunctionExpression:
fnType := apply(subst, nil, n.TypeOf())
num, err := fnType.NumArguments()
if err != nil {
return nil, err
}
params := make([]functionParam, 0, num)
for i := 0; i < num; i++ {
arg, err := fnType.Argument(i)
if err != nil {
return nil, err
}
k := string(arg.Name())
pt, err := arg.TypeOf()
if err != nil {
return nil, err
}
param := functionParam{
Key: k,
Type: pt,
}
if n.Defaults != nil {
// Search for default value
for _, d := range n.Defaults.Properties {
if d.Key.Key() == k {
d, err := compile(d.Value, subst, scope)
if err != nil {
return nil, err
}
param.Default = d
break
}
}
}
params = append(params, param)
}
return &functionEvaluator{
t: fnType,
params: params,
fn: n,
}, nil
default:
return nil, errors.Newf(codes.Internal, "unknown semantic node of type %T", n)
}
}
func containsStr(strs []string, str string) bool {
for _, s := range strs {
if str == s {
return true
}
}
return false
}
| 1 | 15,181 | I changed the name of the arguments here just to help make the code clearer. | influxdata-flux | go |
@@ -404,7 +404,10 @@ class Callable(param.Parameterized):
allowing their inputs (and in future outputs) to be defined.
This makes it possible to wrap DynamicMaps with streams and
makes it possible to traverse the graph of operations applied
- to a DynamicMap.
+ to a DynamicMap. Additionally a Callable will memoize the last
+ returned value based on the arguments to the function and the
+ state of all streams on its inputs, avoiding calling the function
+ unncessarily.
"""
callable_function = param.Callable(default=lambda x: x, doc=""" | 1 | import itertools
import types
from numbers import Number
from itertools import groupby
from functools import partial
import numpy as np
import param
from . import traversal, util
from .dimension import OrderedDict, Dimension, ViewableElement
from .layout import Layout, AdjointLayout, NdLayout
from .ndmapping import UniformNdMapping, NdMapping, item_check
from .overlay import Overlay, CompositeOverlay, NdOverlay
from .options import Store, StoreOptions
class HoloMap(UniformNdMapping):
"""
A HoloMap can hold any number of DataLayers indexed by a list of
dimension values. It also has a number of properties, which can find
the x- and y-dimension limits and labels.
"""
data_type = (ViewableElement, NdMapping, Layout)
def overlay(self, dimensions=None, **kwargs):
"""
Splits the UniformNdMapping along a specified number of dimensions and
overlays items in the split out Maps.
Shows all HoloMap data When no dimensions are specified.
"""
dimensions = self._valid_dimensions(dimensions)
if len(dimensions) == self.ndims:
with item_check(False):
return NdOverlay(self, **kwargs).reindex(dimensions)
else:
dims = [d for d in self.kdims if d not in dimensions]
return self.groupby(dims, group_type=NdOverlay, **kwargs)
def grid(self, dimensions=None, **kwargs):
"""
GridSpace takes a list of one or two dimensions, and lays out the containing
Views along these axes in a GridSpace.
Shows all HoloMap data When no dimensions are specified.
"""
dimensions = self._valid_dimensions(dimensions)
if len(dimensions) == self.ndims:
with item_check(False):
return GridSpace(self, **kwargs).reindex(dimensions)
return self.groupby(dimensions, container_type=GridSpace, **kwargs)
def layout(self, dimensions=None, **kwargs):
"""
GridSpace takes a list of one or two dimensions, and lays out the containing
Views along these axes in a GridSpace.
Shows all HoloMap data When no dimensions are specified.
"""
dimensions = self._valid_dimensions(dimensions)
if len(dimensions) == self.ndims:
with item_check(False):
return NdLayout(self, **kwargs).reindex(dimensions)
return self.groupby(dimensions, container_type=NdLayout, **kwargs)
def split_overlays(self):
"""
Given a UniformNdMapping of Overlays of N layers, split out the layers into
N separate Maps.
"""
if not issubclass(self.type, CompositeOverlay):
return None, self.clone()
item_maps = OrderedDict()
for k, overlay in self.data.items():
for key, el in overlay.items():
if key not in item_maps:
item_maps[key] = [(k, el)]
else:
item_maps[key].append((k, el))
maps, keys = [], []
for k, layermap in item_maps.items():
maps.append(self.clone(layermap))
keys.append(k)
return keys, maps
def _dimension_keys(self):
"""
Helper for __mul__ that returns the list of keys together with
the dimension labels.
"""
return [tuple(zip([d.name for d in self.kdims], [k] if self.ndims == 1 else k))
for k in self.keys()]
def _dynamic_mul(self, dimensions, other, keys):
"""
Implements dynamic version of overlaying operation overlaying
DynamicMaps and HoloMaps where the key dimensions of one is
a strict superset of the other.
"""
# If either is a HoloMap compute Dimension values
if not isinstance(self, DynamicMap) or not isinstance(other, DynamicMap):
keys = sorted((d, v) for k in keys for d, v in k)
grouped = dict([(g, [v for _, v in group])
for g, group in groupby(keys, lambda x: x[0])])
dimensions = [d(values=grouped[d.name]) for d in dimensions]
mode = 'bounded'
map_obj = None
elif (isinstance(self, DynamicMap) and (other, DynamicMap) and
self.mode != other.mode):
raise ValueError("Cannot overlay DynamicMaps with mismatching mode.")
else:
map_obj = self if isinstance(self, DynamicMap) else other
mode = map_obj.mode
def dynamic_mul(*key, **kwargs):
key = key[0] if mode == 'open' else key
layers = []
try:
if isinstance(self, DynamicMap):
safe_key = () if not self.kdims else key
_, self_el = util.get_dynamic_item(self, dimensions, safe_key)
if self_el is not None:
layers.append(self_el)
else:
layers.append(self[key])
except KeyError:
pass
try:
if isinstance(other, DynamicMap):
safe_key = () if not other.kdims else key
_, other_el = util.get_dynamic_item(other, dimensions, safe_key)
if other_el is not None:
layers.append(other_el)
else:
layers.append(other[key])
except KeyError:
pass
return Overlay(layers)
callback = Callable(callable_function=dynamic_mul, inputs=[self, other])
if map_obj:
return map_obj.clone(callback=callback, shared_data=False,
kdims=dimensions, streams=[])
else:
return DynamicMap(callback=callback, kdims=dimensions)
def __mul__(self, other):
"""
The mul (*) operator implements overlaying of different Views.
This method tries to intelligently overlay Maps with differing
keys. If the UniformNdMapping is mulled with a simple
ViewableElement each element in the UniformNdMapping is
overlaid with the ViewableElement. If the element the
UniformNdMapping is mulled with is another UniformNdMapping it
will try to match up the dimensions, making sure that items
with completely different dimensions aren't overlaid.
"""
if isinstance(other, HoloMap):
self_set = {d.name for d in self.kdims}
other_set = {d.name for d in other.kdims}
# Determine which is the subset, to generate list of keys and
# dimension labels for the new view
self_in_other = self_set.issubset(other_set)
other_in_self = other_set.issubset(self_set)
dimensions = self.kdims
if self_in_other and other_in_self: # superset of each other
keys = self._dimension_keys() + other._dimension_keys()
super_keys = util.unique_iterator(keys)
elif self_in_other: # self is superset
dimensions = other.kdims
super_keys = other._dimension_keys()
elif other_in_self: # self is superset
super_keys = self._dimension_keys()
else: # neither is superset
raise Exception('One set of keys needs to be a strict subset of the other.')
if isinstance(self, DynamicMap) or isinstance(other, DynamicMap):
return self._dynamic_mul(dimensions, other, super_keys)
items = []
for dim_keys in super_keys:
# Generate keys for both subset and superset and sort them by the dimension index.
self_key = tuple(k for p, k in sorted(
[(self.get_dimension_index(dim), v) for dim, v in dim_keys
if dim in self.kdims]))
other_key = tuple(k for p, k in sorted(
[(other.get_dimension_index(dim), v) for dim, v in dim_keys
if dim in other.kdims]))
new_key = self_key if other_in_self else other_key
# Append SheetOverlay of combined items
if (self_key in self) and (other_key in other):
items.append((new_key, self[self_key] * other[other_key]))
elif self_key in self:
items.append((new_key, Overlay([self[self_key]])))
else:
items.append((new_key, Overlay([other[other_key]])))
return self.clone(items, kdims=dimensions, label=self._label, group=self._group)
elif isinstance(other, self.data_type):
if isinstance(self, DynamicMap):
def dynamic_mul(*args, **kwargs):
element = self[args]
return element * other
callback = Callable(callable_function=dynamic_mul,
inputs=[self, other])
return self.clone(shared_data=False, callback=callback,
streams=[])
items = [(k, v * other) for (k, v) in self.data.items()]
return self.clone(items, label=self._label, group=self._group)
else:
raise Exception("Can only overlay with {data} or {vmap}.".format(
data=self.data_type, vmap=self.__class__.__name__))
def __add__(self, obj):
return Layout.from_values(self) + Layout.from_values(obj)
def __lshift__(self, other):
if isinstance(other, (ViewableElement, UniformNdMapping)):
return AdjointLayout([self, other])
elif isinstance(other, AdjointLayout):
return AdjointLayout(other.data+[self])
else:
raise TypeError('Cannot append {0} to a AdjointLayout'.format(type(other).__name__))
def collate(self, merge_type=None, drop=[], drop_constant=False):
"""
Collation allows collapsing nested HoloMaps by merging
their dimensions. In the simple case a HoloMap containing
other HoloMaps can easily be joined in this way. However
collation is particularly useful when the objects being
joined are deeply nested, e.g. you want to join multiple
Layouts recorded at different times, collation will return
one Layout containing HoloMaps indexed by Time. Changing
the merge_type will allow merging the outer Dimension
into any other UniformNdMapping type.
Specific dimensions may be dropped if they are redundant
by supplying them in a list. Enabling drop_constant allows
ignoring any non-varying dimensions during collation.
"""
from .element import Collator
merge_type=merge_type if merge_type else self.__class__
return Collator(self, merge_type=merge_type, drop=drop,
drop_constant=drop_constant)()
def collapse(self, dimensions=None, function=None, spreadfn=None, **kwargs):
"""
Allows collapsing one of any number of key dimensions
on the HoloMap. Homogenous Elements may be collapsed by
supplying a function, inhomogenous elements are merged.
"""
from .operation import MapOperation
if not dimensions:
dimensions = self.kdims
if not isinstance(dimensions, list): dimensions = [dimensions]
if self.ndims > 1 and len(dimensions) != self.ndims:
groups = self.groupby([dim for dim in self.kdims
if dim not in dimensions])
elif all(d in self.kdims for d in dimensions):
groups = HoloMap([(0, self)])
else:
raise KeyError("Supplied dimensions not found.")
collapsed = groups.clone(shared_data=False)
for key, group in groups.items():
if isinstance(function, MapOperation):
collapsed[key] = function(group, **kwargs)
else:
group_data = [el.data for el in group]
args = (group_data, function, group.last.kdims)
if hasattr(group.last, 'interface'):
col_data = group.type(group.table().aggregate(group.last.kdims, function, spreadfn, **kwargs))
else:
data = group.type.collapse_data(*args, **kwargs)
col_data = group.last.clone(data)
collapsed[key] = col_data
return collapsed if self.ndims > 1 else collapsed.last
def sample(self, samples=[], bounds=None, **sample_values):
"""
Sample each Element in the UniformNdMapping by passing either a list of
samples or a tuple specifying the number of regularly spaced
samples per dimension. Alternatively, a single sample may be
requested using dimension-value pairs. Optionally, the bounds
argument can be used to specify the bounding extent from which
the coordinates are to regularly sampled. Regular sampling
assumes homogenous and regularly sampled data.
For 1D sampling, the shape is simply as the desired number of
samples (and not a tuple). The bounds format for 1D sampling
is the tuple (lower, upper) and the tuple (left, bottom,
right, top) for 2D sampling.
"""
dims = self.last.ndims
if isinstance(samples, tuple) or np.isscalar(samples):
if dims == 1:
xlim = self.last.range(0)
lower, upper = (xlim[0], xlim[1]) if bounds is None else bounds
edges = np.linspace(lower, upper, samples+1)
linsamples = [(l+u)/2.0 for l,u in zip(edges[:-1], edges[1:])]
elif dims == 2:
(rows, cols) = samples
if bounds:
(l,b,r,t) = bounds
else:
l, r = self.last.range(0)
b, t = self.last.range(1)
xedges = np.linspace(l, r, cols+1)
yedges = np.linspace(b, t, rows+1)
xsamples = [(lx+ux)/2.0 for lx,ux in zip(xedges[:-1], xedges[1:])]
ysamples = [(ly+uy)/2.0 for ly,uy in zip(yedges[:-1], yedges[1:])]
Y,X = np.meshgrid(ysamples, xsamples)
linsamples = zip(X.flat, Y.flat)
else:
raise NotImplementedError("Regular sampling not implemented "
"for high-dimensional Views.")
samples = list(util.unique_iterator(self.last.closest(linsamples)))
sampled = self.clone([(k, view.sample(samples, **sample_values))
for k, view in self.data.items()])
return sampled.table()
def reduce(self, dimensions=None, function=None, **reduce_map):
"""
Reduce each Element in the HoloMap using a function supplied
via the kwargs, where the keyword has to match a particular
dimension in the Elements.
"""
from ..element import Table
reduced_items = [(k, v.reduce(dimensions, function, **reduce_map))
for k, v in self.items()]
if not isinstance(reduced_items[0][1], Table):
params = dict(util.get_param_values(self.last),
kdims=self.kdims, vdims=self.last.vdims)
return Table(reduced_items, **params)
return self.clone(reduced_items).table()
def relabel(self, label=None, group=None, depth=1):
# Identical to standard relabel method except for default depth of 1
return super(HoloMap, self).relabel(label=label, group=group, depth=depth)
def hist(self, num_bins=20, bin_range=None, adjoin=True, individually=True, **kwargs):
histmaps = [self.clone(shared_data=False) for _ in
kwargs.get('dimension', range(1))]
if individually:
map_range = None
else:
if 'dimension' not in kwargs:
raise Exception("Please supply the dimension to compute a histogram for.")
map_range = self.range(kwargs['dimension'])
bin_range = map_range if bin_range is None else bin_range
style_prefix = 'Custom[<' + self.name + '>]_'
if issubclass(self.type, (NdOverlay, Overlay)) and 'index' not in kwargs:
kwargs['index'] = 0
for k, v in self.data.items():
hists = v.hist(adjoin=False, bin_range=bin_range,
individually=individually, num_bins=num_bins,
style_prefix=style_prefix, **kwargs)
if isinstance(hists, Layout):
for i, hist in enumerate(hists):
histmaps[i][k] = hist
else:
histmaps[0][k] = hists
if adjoin:
layout = self
for hist in histmaps:
layout = (layout << hist)
if issubclass(self.type, (NdOverlay, Overlay)):
layout.main_layer = kwargs['index']
return layout
else:
if len(histmaps) > 1:
return Layout.from_values(histmaps)
else:
return histmaps[0]
class Callable(param.Parameterized):
"""
Callable allows wrapping callbacks on one or more DynamicMaps
allowing their inputs (and in future outputs) to be defined.
This makes it possible to wrap DynamicMaps with streams and
makes it possible to traverse the graph of operations applied
to a DynamicMap.
"""
callable_function = param.Callable(default=lambda x: x, doc="""
The callable function being wrapped.""")
inputs = param.List(default=[], doc="""
The list of inputs the callable function is wrapping.""")
def __call__(self, *args, **kwargs):
return self.callable_function(*args, **kwargs)
def get_nested_streams(dmap):
"""
Get all (potentially nested) streams from DynamicMap with Callable
callback.
"""
layer_streams = list(dmap.streams)
if not isinstance(dmap.callback, Callable):
return list(set(layer_streams))
for o in dmap.callback.inputs:
if isinstance(o, DynamicMap):
layer_streams += get_nested_streams(o)
return list(set(layer_streams))
class DynamicMap(HoloMap):
"""
A DynamicMap is a type of HoloMap where the elements are dynamically
generated by a callback which may be either a callable or a
generator. A DynamicMap supports two different modes depending on
the type of callable supplied and the dimension declarations.
The 'bounded' mode is used when the limits of the parameter space
are known upon declaration (as specified by the ranges on the key
dimensions) or 'open' which allows the continual generation of
elements (e.g as data output by a simulator over an unbounded
simulated time dimension).
Generators always imply open mode but a callable that has any key
dimension unbounded in any direction will also be in open
mode. Bounded mode only applied to callables where all the key
dimensions are fully bounded.
"""
_sorted = False
# Declare that callback is a positional parameter (used in clone)
__pos_params = ['callback']
callback = param.Parameter(doc="""
The callable or generator used to generate the elements. In the
simplest case where all key dimensions are bounded, this can be
a callable that accepts the key dimension values as arguments
(in the declared order) and returns the corresponding element.
For open mode where there is an unbounded key dimension, the
return type can specify a key as well as element as the tuple
(key, element). If no key is supplied, a simple counter is used
instead.
If the callback is a generator, open mode is used and next() is
simply called. If the callback is callable and in open mode, the
element counter value will be supplied as the single
argument. This can be used to avoid issues where multiple
elements in a Layout each call next() leading to uncontrolled
changes in simulator state (the counter can be used to indicate
simulation time across the layout).
""")
streams = param.List(default=[], doc="""
List of Stream instances to associate with the DynamicMap. The
set of parameter values across these streams will be supplied as
keyword arguments to the callback when the events are received,
updating the streams.
Note that streams may only be used with callable callbacks (i.e
not generators).""" )
cache_size = param.Integer(default=500, doc="""
The number of entries to cache for fast access. This is an LRU
cache where the least recently used item is overwritten once
the cache is full.""")
cache_interval = param.Integer(default=1, doc="""
When the element counter modulo the cache_interval is zero, the
element will be cached and therefore accessible when casting to a
HoloMap. Applicable in open mode only.""")
sampled = param.Boolean(default=False, doc="""
Allows defining a DynamicMap in bounded mode without defining the
dimension bounds or values. The DynamicMap may then be explicitly
sampled via getitem or the sampling is determined during plotting
by a HoloMap with fixed sampling.
""")
def __init__(self, callback, initial_items=None, **params):
super(DynamicMap, self).__init__(initial_items, callback=callback, **params)
# Set source to self if not already specified
for stream in self.streams:
if stream.source is None:
stream.source = self
self.counter = 0
if self.callback is None:
raise Exception("A suitable callback must be "
"declared to create a DynamicMap")
self.call_mode = self._validate_mode()
self.mode = 'bounded' if self.call_mode == 'key' else 'open'
self._dimensionless_cache = False
def _initial_key(self):
"""
Construct an initial key for bounded mode based on the lower
range bounds or values on the key dimensions.
"""
key = []
for kdim in self.kdims:
if kdim.values:
key.append(kdim.values[0])
elif kdim.range:
key.append(kdim.range[0])
return tuple(key)
def _validate_mode(self):
"""
Check the key dimensions and callback to determine the calling mode.
"""
isgenerator = isinstance(self.callback, types.GeneratorType)
if isgenerator:
if self.sampled:
raise ValueError("Cannot set DynamicMap containing generator "
"to sampled")
return 'generator'
if self.sampled:
return 'key'
# Any unbounded kdim (any direction) implies open mode
for kdim in self.kdims:
if kdim.name in util.stream_parameters(self.streams):
return 'key'
if kdim.values:
continue
if None in kdim.range:
return 'counter'
return 'key'
def _validate_key(self, key):
"""
Make sure the supplied key values are within the bounds
specified by the corresponding dimension range and soft_range.
"""
key = util.wrap_tuple(key)
assert len(key) == len(self.kdims)
for ind, val in enumerate(key):
kdim = self.kdims[ind]
low, high = util.max_range([kdim.range, kdim.soft_range])
if low is not np.NaN:
if val < low:
raise StopIteration("Key value %s below lower bound %s"
% (val, low))
if high is not np.NaN:
if val > high:
raise StopIteration("Key value %s above upper bound %s"
% (val, high))
def event(self, trigger=True, **kwargs):
"""
This method allows any of the available stream parameters to be
updated in an event.
"""
stream_params = set(util.stream_parameters(self.streams))
updated_streams = []
for stream in self.streams:
overlap = set(stream.params().keys()) & stream_params & set(kwargs.keys())
if overlap:
stream.update(**dict({k:kwargs[k] for k in overlap}, trigger=False))
updated_streams.append(stream)
if updated_streams and trigger:
updated_streams[0].trigger(updated_streams)
def _style(self, retval):
"""
Use any applicable OptionTree of the DynamicMap to apply options
to the return values of the callback.
"""
if self.id not in Store.custom_options():
return retval
spec = StoreOptions.tree_to_dict(Store.custom_options()[self.id])
return retval(spec)
def _execute_callback(self, *args):
"""
Execute the callback, validating both the input key and output
key where applicable.
"""
if self.call_mode == 'key':
self._validate_key(args) # Validate input key
if self.call_mode == 'generator':
retval = next(self.callback)
else:
# Additional validation needed to ensure kwargs don't clash
kdims = [kdim.name for kdim in self.kdims]
kwarg_items = [s.contents.items() for s in self.streams]
flattened = [(k,v) for kws in kwarg_items for (k,v) in kws
if k not in kdims]
retval = self.callback(*args, **dict(flattened))
if self.call_mode=='key':
return self._style(retval)
if isinstance(retval, tuple):
self._validate_key(retval[0]) # Validated output key
return (retval[0], self._style(retval[1]))
else:
self._validate_key((self.counter,))
return (self.counter, self._style(retval))
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):
"""
Clone method to adapt the slightly different signature of
DynamicMap that also overrides Dimensioned clone to avoid
checking items if data is unchanged.
"""
if data is None and shared_data:
data = self.data
return super(UniformNdMapping, self).clone(overrides.pop('callback', self.callback),
shared_data, new_type,
*(data,) + args, **overrides)
def reset(self):
"""
Return a cleared dynamic map with a cleared cached
and a reset counter.
"""
if self.call_mode == 'generator':
raise Exception("Cannot reset generators.")
self.counter = 0
self.data = OrderedDict()
return self
def _cross_product(self, tuple_key, cache, data_slice):
"""
Returns a new DynamicMap if the key (tuple form) expresses a
cross product, otherwise returns None. The cache argument is a
dictionary (key:element pairs) of all the data found in the
cache for this key.
Each key inside the cross product is looked up in the cache
(self.data) to check if the appropriate element is
available. Otherwise the element is computed accordingly.
The data_slice may specify slices into each value in the
the cross-product.
"""
if self.mode != 'bounded': return None
if not any(isinstance(el, (list, set)) for el in tuple_key):
return None
if len(tuple_key)==1:
product = tuple_key[0]
else:
args = [set(el) if isinstance(el, (list,set))
else set([el]) for el in tuple_key]
product = itertools.product(*args)
data = []
for inner_key in product:
key = util.wrap_tuple(inner_key)
if key in cache:
val = cache[key]
else:
val = self._execute_callback(*key)
if data_slice:
val = self._dataslice(val, data_slice)
data.append((key, val))
product = self.clone(data)
if data_slice:
from ..util import Dynamic
return Dynamic(product, operation=lambda obj: obj[data_slice],
shared_data=True)
return product
def _slice_bounded(self, tuple_key, data_slice):
"""
Slices bounded DynamicMaps by setting the soft_ranges on
key dimensions and applies data slice to cached and dynamic
values.
"""
slices = [el for el in tuple_key if isinstance(el, slice)]
if any(el.step for el in slices):
raise Exception("Slices cannot have a step argument "
"in DynamicMap bounded mode ")
elif len(slices) not in [0, len(tuple_key)]:
raise Exception("Slices must be used exclusively or not at all")
elif not slices:
return None
sliced = self.clone(self)
for i, slc in enumerate(tuple_key):
(start, stop) = slc.start, slc.stop
if start is not None and start < sliced.kdims[i].range[0]:
raise Exception("Requested slice below defined dimension range.")
if stop is not None and stop > sliced.kdims[i].range[1]:
raise Exception("Requested slice above defined dimension range.")
sliced.kdims[i].soft_range = (start, stop)
if data_slice:
if not isinstance(sliced, DynamicMap):
return self._dataslice(sliced, data_slice)
else:
from ..util import Dynamic
if len(self):
slices = [slice(None) for _ in range(self.ndims)] + list(data_slice)
sliced = super(DynamicMap, sliced).__getitem__(tuple(slices))
return Dynamic(sliced, operation=lambda obj: obj[data_slice],
shared_data=True)
return sliced
def __getitem__(self, key):
"""
Return an element for any key chosen key (in'bounded mode') or
for a previously generated key that is still in the cache
(for one of the 'open' modes). Also allows for usual deep
slicing semantics by slicing values in the cache and applying
the deep slice to newly generated values.
"""
# Split key dimensions and data slices
sample = False
if key is Ellipsis:
return self
elif isinstance(key, (list, set)) and all(isinstance(v, tuple) for v in key):
map_slice, data_slice = key, ()
sample = True
else:
map_slice, data_slice = self._split_index(key)
tuple_key = util.wrap_tuple_streams(map_slice, self.kdims, self.streams)
# Validation for bounded mode
if self.mode == 'bounded' and not sample:
sliced = self._slice_bounded(tuple_key, data_slice)
if sliced is not None:
return sliced
# Cache lookup
try:
dimensionless = util.dimensionless_contents(get_nested_streams(self),
self.kdims, no_duplicates=False)
if (dimensionless and not self._dimensionless_cache):
raise KeyError('Using dimensionless streams disables DynamicMap cache')
cache = super(DynamicMap,self).__getitem__(key)
# Return selected cache items in a new DynamicMap
if isinstance(cache, DynamicMap) and self.mode=='open':
cache = self.clone(cache)
except KeyError as e:
cache = None
if self.mode == 'open' and len(self.data)>0:
raise KeyError(str(e) + " Note: Cannot index outside "
"available cache in open interval mode.")
# If the key expresses a cross product, compute the elements and return
product = self._cross_product(tuple_key, cache.data if cache else {}, data_slice)
if product is not None:
return product
# Not a cross product and nothing cached so compute element.
if cache is not None: return cache
val = self._execute_callback(*tuple_key)
if self.call_mode == 'counter':
val = val[1]
if data_slice:
val = self._dataslice(val, data_slice)
self._cache(tuple_key, val)
return val
def select(self, selection_specs=None, **kwargs):
"""
Allows slicing or indexing into the DynamicMap objects by
supplying the dimension and index/slice as key value
pairs. Select descends recursively through the data structure
applying the key dimension selection and applies to dynamically
generated items by wrapping the callback.
The selection may also be selectively applied to specific
objects by supplying the selection_specs as an iterable of
type.group.label specs, types or functions.
"""
if selection_specs is not None and not isinstance(selection_specs, (list, tuple)):
selection_specs = [selection_specs]
selection = super(DynamicMap, self).select(selection_specs, **kwargs)
def dynamic_select(obj):
if selection_specs is not None:
matches = any(obj.matches(spec) for spec in selection_specs)
else:
matches = True
if matches:
return obj.select(**kwargs)
return obj
if not isinstance(selection, DynamicMap):
return dynamic_select(selection)
else:
from ..util import Dynamic
return Dynamic(selection, operation=dynamic_select,
shared_data=True)
def _cache(self, key, val):
"""
Request that a key/value pair be considered for caching.
"""
cache_size = (1 if util.dimensionless_contents(self.streams, self.kdims)
else self.cache_size)
if self.mode == 'open' and (self.counter % self.cache_interval)!=0:
return
if len(self) >= cache_size:
first_key = next(k for k in self.data)
self.data.pop(first_key)
self.data[key] = val
def next(self):
"""
Interface for 'open' mode. For generators, this simply calls the
next() method. For callables callback, the counter is supplied
as a single argument.
"""
if self.mode == 'bounded':
raise Exception("The next() method should only be called in "
"one of the open modes.")
args = () if self.call_mode == 'generator' else (self.counter,)
retval = self._execute_callback(*args)
(key, val) = (retval if isinstance(retval, tuple)
else (self.counter, retval))
key = util.wrap_tuple_streams(key, self.kdims, self.streams)
if len(key) != len(self.key_dimensions):
raise Exception("Generated key does not match the number of key dimensions")
self._cache(key, val)
self.counter += 1
return val
def relabel(self, label=None, group=None, depth=1):
"""
Assign a new label and/or group to an existing LabelledData
object, creating a clone of the object with the new settings.
"""
relabelled = super(DynamicMap, self).relabel(label, group, depth)
if depth > 0:
from ..util import Dynamic
def dynamic_relabel(obj):
return obj.relabel(group=group, label=label, depth=depth-1)
return Dynamic(relabelled, shared_data=True, operation=dynamic_relabel)
return relabelled
def redim(self, specs=None, **dimensions):
"""
Replaces existing dimensions in an object with new dimensions
or changing specific attributes of a dimensions. Dimension
mapping should map between the old dimension name and a
dictionary of the new attributes, a completely new dimension
or a new string name.
"""
redimmed = super(DynamicMap, self).redim(specs, **dimensions)
from ..util import Dynamic
def dynamic_redim(obj):
return obj.redim(specs, **dimensions)
return Dynamic(redimmed, shared_data=True, operation=dynamic_redim)
def groupby(self, dimensions=None, container_type=None, group_type=None, **kwargs):
"""
Implements a dynamic version of a groupby, which will
intelligently expand either the inner or outer dimensions
depending on whether the container_type or group_type is dynamic.
To apply a groupby to a DynamicMap the dimensions, which are
expanded into a non-dynamic type must define a fixed sampling
via the values attribute.
Using the dynamic groupby makes it incredibly easy to generate
dynamic views into a high-dimensional space while taking
advantage of the capabilities of NdOverlay, GridSpace and
NdLayout types to visualize more than one Element at a time.
"""
if dimensions is None:
dimensions = self.kdims
if not isinstance(dimensions, (list, tuple)):
dimensions = [dimensions]
container_type = container_type if container_type else type(self)
group_type = group_type if group_type else type(self)
outer_kdims = [self.get_dimension(d) for d in dimensions]
inner_kdims = [d for d in self.kdims if not d in outer_kdims]
outer_dynamic = issubclass(container_type, DynamicMap)
inner_dynamic = issubclass(group_type, DynamicMap)
if ((not outer_dynamic and any(not d.values for d in outer_kdims)) or
(not inner_dynamic and any(not d.values for d in inner_kdims))):
raise Exception('Dimensions must specify sampling via '
'values to apply a groupby')
if outer_dynamic:
def outer_fn(*outer_key):
if inner_dynamic:
def inner_fn(*inner_key):
outer_vals = zip(outer_kdims, util.wrap_tuple(outer_key))
inner_vals = zip(inner_kdims, util.wrap_tuple(inner_key))
inner_sel = [(k.name, v) for k, v in inner_vals]
outer_sel = [(k.name, v) for k, v in outer_vals]
return self.select(**dict(inner_sel+outer_sel))
return self.clone([], callback=inner_fn, kdims=inner_kdims)
else:
dim_vals = [(d.name, d.values) for d in inner_kdims]
dim_vals += [(d.name, [v]) for d, v in
zip(outer_kdims, util.wrap_tuple(outer_key))]
return group_type(self.select(**dict(dim_vals))).reindex(inner_kdims)
if outer_kdims:
return self.clone([], callback=outer_fn, kdims=outer_kdims)
else:
return outer_fn(())
else:
outer_product = itertools.product(*[self.get_dimension(d).values
for d in dimensions])
groups = []
for outer in outer_product:
outer_vals = [(d.name, [o]) for d, o in zip(outer_kdims, outer)]
if inner_dynamic or not inner_kdims:
def inner_fn(outer_vals, *key):
inner_dims = zip(inner_kdims, util.wrap_tuple(key))
inner_vals = [(d.name, k) for d, k in inner_dims]
return self.select(**dict(outer_vals+inner_vals)).last
if inner_kdims:
group = self.clone(callback=partial(inner_fn, outer_vals),
kdims=inner_kdims)
else:
group = inner_fn(outer_vals, ())
groups.append((outer, group))
else:
inner_vals = [(d.name, self.get_dimension(d).values)
for d in inner_kdims]
group = group_type(self.select(**dict(outer_vals+inner_vals)).reindex(inner_kdims))
groups.append((outer, group))
return container_type(groups, kdims=outer_kdims)
def grid(self, dimensions=None, **kwargs):
return self.groupby(dimensions, container_type=GridSpace, **kwargs)
def layout(self, dimensions=None, **kwargs):
return self.groupby(dimensions, container_type=NdLayout, **kwargs)
def overlay(self, dimensions=None, **kwargs):
if dimensions is None:
dimensions = self.kdims
if not isinstance(dimensions, (list, tuple)):
dimensions = [dimensions]
dimensions = [self.get_dimension(d) for d in dimensions]
dims = [d for d in self.kdims if d not in dimensions]
return self.groupby(dims, group_type=NdOverlay)
# For Python 2 and 3 compatibility
__next__ = next
class GridSpace(UniformNdMapping):
"""
Grids are distinct from Layouts as they ensure all contained
elements to be of the same type. Unlike Layouts, which have
integer keys, Grids usually have floating point keys, which
correspond to a grid sampling in some two-dimensional space. This
two-dimensional space may have to arbitrary dimensions, e.g. for
2D parameter spaces.
"""
kdims = param.List(default=[Dimension(name="X"), Dimension(name="Y")],
bounds=(1,2))
def __init__(self, initial_items=None, **params):
super(GridSpace, self).__init__(initial_items, **params)
if self.ndims > 2:
raise Exception('Grids can have no more than two dimensions.')
def __mul__(self, other):
if isinstance(other, GridSpace):
if set(self.keys()) != set(other.keys()):
raise KeyError("Can only overlay two ParameterGrids if their keys match")
zipped = zip(self.keys(), self.values(), other.values())
overlayed_items = [(k, el1 * el2) for (k, el1, el2) in zipped]
return self.clone(overlayed_items)
elif isinstance(other, UniformNdMapping) and len(other) == 1:
view = other.last
elif isinstance(other, UniformNdMapping) and len(other) != 1:
raise Exception("Can only overlay with HoloMap of length 1")
else:
view = other
overlayed_items = [(k, el * view) for k, el in self.items()]
return self.clone(overlayed_items)
def __lshift__(self, other):
if isinstance(other, (ViewableElement, UniformNdMapping)):
return AdjointLayout([self, other])
elif isinstance(other, AdjointLayout):
return AdjointLayout(other.data+[self])
else:
raise TypeError('Cannot append {0} to a AdjointLayout'.format(type(other).__name__))
def _transform_indices(self, key):
"""
Transforms indices by snapping to the closest value if
values are numeric, otherwise applies no transformation.
"""
ndims = self.ndims
if all(not (isinstance(el, slice) or callable(el)) for el in key):
dim_inds = []
for dim in self.kdims:
dim_type = self.get_dimension_type(dim)
if isinstance(dim_type, type) and issubclass(dim_type, Number):
dim_inds.append(self.get_dimension_index(dim))
str_keys = iter(key[i] for i in range(self.ndims)
if i not in dim_inds)
num_keys = []
if len(dim_inds):
keys = list({tuple(k[i] if ndims > 1 else k for i in dim_inds)
for k in self.keys()})
q = np.array([tuple(key[i] if ndims > 1 else key for i in dim_inds)])
idx = np.argmin([np.inner(q - np.array(x), q - np.array(x))
if len(dim_inds) == 2 else np.abs(q-x)
for x in keys])
num_keys = iter(keys[idx])
key = tuple(next(num_keys) if i in dim_inds else next(str_keys)
for i in range(self.ndims))
elif any(not (isinstance(el, slice) or callable(el)) for el in key):
index_inds = [idx for idx, el in enumerate(key)
if not isinstance(el, (slice, str))]
if len(index_inds):
index_ind = index_inds[0]
dim_keys = np.array([k[index_ind] for k in self.keys()])
snapped_val = dim_keys[np.argmin(np.abs(dim_keys-key[index_ind]))]
key = list(key)
key[index_ind] = snapped_val
key = tuple(key)
return key
def keys(self, full_grid=False):
"""
Returns a complete set of keys on a GridSpace, even when GridSpace isn't fully
populated. This makes it easier to identify missing elements in the
GridSpace.
"""
keys = super(GridSpace, self).keys()
if self.ndims == 1 or not full_grid:
return keys
dim1_keys = sorted(set(k[0] for k in keys))
dim2_keys = sorted(set(k[1] for k in keys))
return [(d1, d2) for d1 in dim1_keys for d2 in dim2_keys]
@property
def last(self):
"""
The last of a GridSpace is another GridSpace
constituted of the last of the individual elements. To access
the elements by their X,Y position, either index the position
directly or use the items() method.
"""
if self.type == HoloMap:
last_items = [(k, v.last if isinstance(v, HoloMap) else v)
for (k, v) in self.data.items()]
else:
last_items = self.data
return self.clone(last_items)
def __len__(self):
"""
The maximum depth of all the elements. Matches the semantics
of __len__ used by Maps. For the total number of elements,
count the full set of keys.
"""
return max([(len(v) if hasattr(v, '__len__') else 1) for v in self.values()] + [0])
def __add__(self, obj):
return Layout.from_values(self) + Layout.from_values(obj)
@property
def shape(self):
keys = self.keys()
if self.ndims == 1:
return (len(keys), 1)
return len(set(k[0] for k in keys)), len(set(k[1] for k in keys))
class GridMatrix(GridSpace):
"""
GridMatrix is container type for heterogeneous Element types
laid out in a grid. Unlike a GridSpace the axes of the Grid
must not represent an actual coordinate space, but may be used
to plot various dimensions against each other. The GridMatrix
is usually constructed using the gridmatrix operation, which
will generate a GridMatrix plotting each dimension in an
Element against each other.
"""
def _item_check(self, dim_vals, data):
if not traversal.uniform(NdMapping([(0, self), (1, data)])):
raise ValueError("HoloMaps dimensions must be consistent in %s." %
type(self).__name__)
NdMapping._item_check(self, dim_vals, data)
| 1 | 16,230 | I think either 'avoiding calls to the function' or 'to avoid calling the function ...' would be read better. | holoviz-holoviews | py |
@@ -5425,7 +5425,7 @@ SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd, const SyncValidator &
// TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
// Note that this a safe to presist as long as shared_attachments is not cleared
attachments_.reserve(shared_attachments_.size());
- for (const auto attachment : shared_attachments_) {
+ for (const auto &attachment : shared_attachments_) {
attachments_.emplace_back(attachment.get());
}
} | 1 | /* Copyright (c) 2019-2021 The Khronos Group Inc.
* Copyright (c) 2019-2021 Valve Corporation
* Copyright (c) 2019-2021 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: John Zulauf <[email protected]>
* Author: Locke Lin <[email protected]>
* Author: Jeremy Gebben <[email protected]>
*/
#include <limits>
#include <vector>
#include <memory>
#include <bitset>
#include "synchronization_validation.h"
#include "sync_utils.h"
static bool SimpleBinding(const BINDABLE &bindable) { return !bindable.sparse && bindable.binding.mem_state; }
const static std::array<AccessAddressType, static_cast<size_t>(AccessAddressType::kTypeCount)> kAddressTypes = {
AccessAddressType::kLinear, AccessAddressType::kIdealized};
static constexpr AccessAddressType GetAccessAddressType(const BUFFER_STATE &) { return AccessAddressType::kLinear; };
static AccessAddressType GetAccessAddressType(const IMAGE_STATE &image) {
return SimpleBinding(image) ? AccessContext::ImageAddressType(image) : AccessAddressType::kIdealized;
}
static const char *string_SyncHazardVUID(SyncHazard hazard) {
switch (hazard) {
case SyncHazard::NONE:
return "SYNC-HAZARD-NONE";
break;
case SyncHazard::READ_AFTER_WRITE:
return "SYNC-HAZARD-READ_AFTER_WRITE";
break;
case SyncHazard::WRITE_AFTER_READ:
return "SYNC-HAZARD-WRITE_AFTER_READ";
break;
case SyncHazard::WRITE_AFTER_WRITE:
return "SYNC-HAZARD-WRITE_AFTER_WRITE";
break;
case SyncHazard::READ_RACING_WRITE:
return "SYNC-HAZARD-READ-RACING-WRITE";
break;
case SyncHazard::WRITE_RACING_WRITE:
return "SYNC-HAZARD-WRITE-RACING-WRITE";
break;
case SyncHazard::WRITE_RACING_READ:
return "SYNC-HAZARD-WRITE-RACING-READ";
break;
default:
assert(0);
}
return "SYNC-HAZARD-INVALID";
}
static bool IsHazardVsRead(SyncHazard hazard) {
switch (hazard) {
case SyncHazard::NONE:
return false;
break;
case SyncHazard::READ_AFTER_WRITE:
return false;
break;
case SyncHazard::WRITE_AFTER_READ:
return true;
break;
case SyncHazard::WRITE_AFTER_WRITE:
return false;
break;
case SyncHazard::READ_RACING_WRITE:
return false;
break;
case SyncHazard::WRITE_RACING_WRITE:
return false;
break;
case SyncHazard::WRITE_RACING_READ:
return true;
break;
default:
assert(0);
}
return false;
}
static const char *string_SyncHazard(SyncHazard hazard) {
switch (hazard) {
case SyncHazard::NONE:
return "NONR";
break;
case SyncHazard::READ_AFTER_WRITE:
return "READ_AFTER_WRITE";
break;
case SyncHazard::WRITE_AFTER_READ:
return "WRITE_AFTER_READ";
break;
case SyncHazard::WRITE_AFTER_WRITE:
return "WRITE_AFTER_WRITE";
break;
case SyncHazard::READ_RACING_WRITE:
return "READ_RACING_WRITE";
break;
case SyncHazard::WRITE_RACING_WRITE:
return "WRITE_RACING_WRITE";
break;
case SyncHazard::WRITE_RACING_READ:
return "WRITE_RACING_READ";
break;
default:
assert(0);
}
return "INVALID HAZARD";
}
static const SyncStageAccessInfoType *SyncStageAccessInfoFromMask(SyncStageAccessFlags flags) {
// Return the info for the first bit found
const SyncStageAccessInfoType *info = nullptr;
for (size_t i = 0; i < flags.size(); i++) {
if (flags.test(i)) {
info = &syncStageAccessInfoByStageAccessIndex[i];
break;
}
}
return info;
}
static std::string string_SyncStageAccessFlags(const SyncStageAccessFlags &flags, const char *sep = "|") {
std::string out_str;
if (flags.none()) {
out_str = "0";
} else {
for (size_t i = 0; i < syncStageAccessInfoByStageAccessIndex.size(); i++) {
const auto &info = syncStageAccessInfoByStageAccessIndex[i];
if ((flags & info.stage_access_bit).any()) {
if (!out_str.empty()) {
out_str.append(sep);
}
out_str.append(info.name);
}
}
if (out_str.length() == 0) {
out_str.append("Unhandled SyncStageAccess");
}
}
return out_str;
}
static std::string string_UsageTag(const ResourceUsageTag &tag) {
std::stringstream out;
out << "command: " << CommandTypeString(tag.command);
out << ", seq_no: " << tag.seq_num;
if (tag.sub_command != 0) {
out << ", subcmd: " << tag.sub_command;
}
return out.str();
}
std::string CommandBufferAccessContext::FormatUsage(const HazardResult &hazard) const {
const auto &tag = hazard.tag;
assert(hazard.usage_index < static_cast<SyncStageAccessIndex>(syncStageAccessInfoByStageAccessIndex.size()));
const auto &usage_info = syncStageAccessInfoByStageAccessIndex[hazard.usage_index];
std::stringstream out;
const auto *info = SyncStageAccessInfoFromMask(hazard.prior_access);
const char *stage_access_name = info ? info->name : "INVALID_STAGE_ACCESS";
out << "(usage: " << usage_info.name << ", prior_usage: " << stage_access_name;
if (IsHazardVsRead(hazard.hazard)) {
const auto barriers = hazard.access_state->GetReadBarriers(hazard.prior_access);
out << ", read_barriers: " << string_VkPipelineStageFlags(barriers);
} else {
SyncStageAccessFlags write_barrier = hazard.access_state->GetWriteBarriers();
out << ", write_barriers: " << string_SyncStageAccessFlags(write_barrier);
}
// PHASE2 TODO -- add comand buffer and reset from secondary if applicable
out << ", " << string_UsageTag(tag) << ", reset_no: " << reset_count_;
return out.str();
}
// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
// rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
// also reflects this special case for read hazard detection (using access instead of exec scope)
static constexpr VkPipelineStageFlags kColorAttachmentExecScope = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
static const SyncStageAccessFlags kColorAttachmentAccessScope =
SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_BIT |
SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE_BIT |
SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
static constexpr VkPipelineStageFlags kDepthStencilAttachmentExecScope =
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
static const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =
SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT; // Note: this is intentionally not in the exec scope
static constexpr VkPipelineStageFlags kRasterAttachmentExecScope = kDepthStencilAttachmentExecScope | kColorAttachmentExecScope;
static const SyncStageAccessFlags kRasterAttachmentAccessScope = kDepthStencilAttachmentAccessScope | kColorAttachmentAccessScope;
ResourceAccessState::OrderingBarriers ResourceAccessState::kOrderingRules = {
{{0U, SyncStageAccessFlags()},
{kColorAttachmentExecScope, kColorAttachmentAccessScope},
{kDepthStencilAttachmentExecScope, kDepthStencilAttachmentAccessScope},
{kRasterAttachmentExecScope, kRasterAttachmentAccessScope}}};
// Sometimes we have an internal access conflict, and we using the kCurrentCommandTag to set and detect in temporary/proxy contexts
static const ResourceUsageTag kCurrentCommandTag(ResourceUsageTag::kMaxIndex, ResourceUsageTag::kMaxCount,
ResourceUsageTag::kMaxCount, CMD_NONE);
static VkDeviceSize ResourceBaseAddress(const BINDABLE &bindable) {
return bindable.binding.offset + bindable.binding.mem_state->fake_base_address;
}
inline VkDeviceSize GetRealWholeSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {
if (size == VK_WHOLE_SIZE) {
return (whole_size - offset);
}
return size;
}
static inline VkDeviceSize GetBufferWholeSize(const BUFFER_STATE &buf_state, VkDeviceSize offset, VkDeviceSize size) {
return GetRealWholeSize(offset, size, buf_state.createInfo.size);
}
template <typename T>
static ResourceAccessRange MakeRange(const T &has_offset_and_size) {
return ResourceAccessRange(has_offset_and_size.offset, (has_offset_and_size.offset + has_offset_and_size.size));
}
static ResourceAccessRange MakeRange(VkDeviceSize start, VkDeviceSize size) { return ResourceAccessRange(start, (start + size)); }
static inline ResourceAccessRange MakeRange(const BUFFER_STATE &buffer, VkDeviceSize offset, VkDeviceSize size) {
return MakeRange(offset, GetBufferWholeSize(buffer, offset, size));
}
static inline ResourceAccessRange MakeRange(const BUFFER_VIEW_STATE &buf_view_state) {
return MakeRange(*buf_view_state.buffer_state.get(), buf_view_state.create_info.offset, buf_view_state.create_info.range);
}
// Range generators for to allow event scope filtration to be limited to the top of the resource access traversal pipeline
//
// Note: there is no "begin/end" or reset facility. These are each written as "one time through" generators.
//
// Usage:
// Constructor() -- initializes the generator to point to the begin of the space declared.
// * -- the current range of the generator empty signfies end
// ++ -- advance to the next non-empty range (or end)
// A wrapper for a single range with the same semantics as the actual generators below
template <typename KeyType>
class SingleRangeGenerator {
public:
SingleRangeGenerator(const KeyType &range) : current_(range) {}
const KeyType &operator*() const { return current_; }
const KeyType *operator->() const { return ¤t_; }
SingleRangeGenerator &operator++() {
current_ = KeyType(); // just one real range
return *this;
}
bool operator==(const SingleRangeGenerator &other) const { return current_ == other.current_; }
private:
SingleRangeGenerator() = default;
const KeyType range_;
KeyType current_;
};
// Generate the ranges that are the intersection of range and the entries in the FilterMap
template <typename FilterMap, typename KeyType = typename FilterMap::key_type>
class FilteredRangeGenerator {
public:
// Default constructed is safe to dereference for "empty" test, but for no other operation.
FilteredRangeGenerator() : range_(), filter_(nullptr), filter_pos_(), current_() {
// Default construction for KeyType *must* be empty range
assert(current_.empty());
}
FilteredRangeGenerator(const FilterMap &filter, const KeyType &range)
: range_(range), filter_(&filter), filter_pos_(), current_() {
SeekBegin();
}
FilteredRangeGenerator(const FilteredRangeGenerator &from) = default;
const KeyType &operator*() const { return current_; }
const KeyType *operator->() const { return ¤t_; }
FilteredRangeGenerator &operator++() {
++filter_pos_;
UpdateCurrent();
return *this;
}
bool operator==(const FilteredRangeGenerator &other) const { return current_ == other.current_; }
private:
void UpdateCurrent() {
if (filter_pos_ != filter_->cend()) {
current_ = range_ & filter_pos_->first;
} else {
current_ = KeyType();
}
}
void SeekBegin() {
filter_pos_ = filter_->lower_bound(range_);
UpdateCurrent();
}
const KeyType range_;
const FilterMap *filter_;
typename FilterMap::const_iterator filter_pos_;
KeyType current_;
};
using SingleAccessRangeGenerator = SingleRangeGenerator<ResourceAccessRange>;
using EventSimpleRangeGenerator = FilteredRangeGenerator<SyncEventState::ScopeMap>;
// Templated to allow for different Range generators or map sources...
// Generate the ranges that are the intersection of the RangeGen ranges and the entries in the FilterMap
template <typename FilterMap, typename RangeGen, typename KeyType = typename FilterMap::key_type>
class FilteredGeneratorGenerator {
public:
// Default constructed is safe to dereference for "empty" test, but for no other operation.
FilteredGeneratorGenerator() : filter_(nullptr), gen_(), filter_pos_(), current_() {
// Default construction for KeyType *must* be empty range
assert(current_.empty());
}
FilteredGeneratorGenerator(const FilterMap &filter, RangeGen &gen) : filter_(&filter), gen_(gen), filter_pos_(), current_() {
SeekBegin();
}
FilteredGeneratorGenerator(const FilteredGeneratorGenerator &from) = default;
const KeyType &operator*() const { return current_; }
const KeyType *operator->() const { return ¤t_; }
FilteredGeneratorGenerator &operator++() {
KeyType gen_range = GenRange();
KeyType filter_range = FilterRange();
current_ = KeyType();
while (gen_range.non_empty() && filter_range.non_empty() && current_.empty()) {
if (gen_range.end > filter_range.end) {
// if the generated range is beyond the filter_range, advance the filter range
filter_range = AdvanceFilter();
} else {
gen_range = AdvanceGen();
}
current_ = gen_range & filter_range;
}
return *this;
}
bool operator==(const FilteredGeneratorGenerator &other) const { return current_ == other.current_; }
private:
KeyType AdvanceFilter() {
++filter_pos_;
auto filter_range = FilterRange();
if (filter_range.valid()) {
FastForwardGen(filter_range);
}
return filter_range;
}
KeyType AdvanceGen() {
++gen_;
auto gen_range = GenRange();
if (gen_range.valid()) {
FastForwardFilter(gen_range);
}
return gen_range;
}
KeyType FilterRange() const { return (filter_pos_ != filter_->cend()) ? filter_pos_->first : KeyType(); }
KeyType GenRange() const { return *gen_; }
KeyType FastForwardFilter(const KeyType &range) {
auto filter_range = FilterRange();
int retry_count = 0;
const static int kRetryLimit = 2; // TODO -- determine whether this limit is optimal
while (!filter_range.empty() && (filter_range.end <= range.begin)) {
if (retry_count < kRetryLimit) {
++filter_pos_;
filter_range = FilterRange();
retry_count++;
} else {
// Okay we've tried walking, do a seek.
filter_pos_ = filter_->lower_bound(range);
break;
}
}
return FilterRange();
}
// TODO: Consider adding "seek" (or an absolute bound "get" to range generators to make this walk
// faster.
KeyType FastForwardGen(const KeyType &range) {
auto gen_range = GenRange();
while (!gen_range.empty() && (gen_range.end <= range.begin)) {
++gen_;
gen_range = GenRange();
}
return gen_range;
}
void SeekBegin() {
auto gen_range = GenRange();
if (gen_range.empty()) {
current_ = KeyType();
filter_pos_ = filter_->cend();
} else {
filter_pos_ = filter_->lower_bound(gen_range);
current_ = gen_range & FilterRange();
}
}
const FilterMap *filter_;
RangeGen gen_;
typename FilterMap::const_iterator filter_pos_;
KeyType current_;
};
using EventImageRangeGenerator = FilteredGeneratorGenerator<SyncEventState::ScopeMap, subresource_adapter::ImageRangeGenerator>;
static const ResourceAccessRange kFullRange(std::numeric_limits<VkDeviceSize>::min(), std::numeric_limits<VkDeviceSize>::max());
ResourceAccessRange GetBufferRange(VkDeviceSize offset, VkDeviceSize buf_whole_size, uint32_t first_index, uint32_t count,
VkDeviceSize stride) {
VkDeviceSize range_start = offset + first_index * stride;
VkDeviceSize range_size = 0;
if (count == UINT32_MAX) {
range_size = buf_whole_size - range_start;
} else {
range_size = count * stride;
}
return MakeRange(range_start, range_size);
}
SyncStageAccessIndex GetSyncStageAccessIndexsByDescriptorSet(VkDescriptorType descriptor_type, const interface_var &descriptor_data,
VkShaderStageFlagBits stage_flag) {
if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
assert(stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT);
return SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ;
}
auto stage_access = syncStageAccessMaskByShaderStage.find(stage_flag);
if (stage_access == syncStageAccessMaskByShaderStage.end()) {
assert(0);
}
if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
return stage_access->second.uniform_read;
}
// If the desriptorSet is writable, we don't need to care SHADER_READ. SHADER_WRITE is enough.
// Because if write hazard happens, read hazard might or might not happen.
// But if write hazard doesn't happen, read hazard is impossible to happen.
if (descriptor_data.is_writable) {
return stage_access->second.shader_write;
}
return stage_access->second.shader_read;
}
bool IsImageLayoutDepthWritable(VkImageLayout image_layout) {
return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
image_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL)
? true
: false;
}
bool IsImageLayoutStencilWritable(VkImageLayout image_layout) {
return (image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
image_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
image_layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL)
? true
: false;
}
// Class AccessContext stores the state of accesses specific to a Command, Subpass, or Queue
template <typename Action>
static void ApplyOverImageRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range_arg,
Action &action) {
// At this point the "apply over range" logic only supports a single memory binding
if (!SimpleBinding(image_state)) return;
auto subresource_range = NormalizeSubresourceRange(image_state.createInfo, subresource_range_arg);
const auto base_address = ResourceBaseAddress(image_state);
subresource_adapter::ImageRangeGenerator range_gen(*image_state.fragment_encoder.get(), subresource_range, {0, 0, 0},
image_state.createInfo.extent, base_address);
for (; range_gen->non_empty(); ++range_gen) {
action(*range_gen);
}
}
// Tranverse the attachment resolves for this a specific subpass, and do action() to them.
// Used by both validation and record operations
//
// The signature for Action() reflect the needs of both uses.
template <typename Action>
void ResolveOperation(Action &action, const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass) {
VkExtent3D extent = CastTo3D(render_area.extent);
VkOffset3D offset = CastTo3D(render_area.offset);
const auto &rp_ci = rp_state.createInfo;
const auto *attachment_ci = rp_ci.pAttachments;
const auto &subpass_ci = rp_ci.pSubpasses[subpass];
// Color resolves -- require an inuse color attachment and a matching inuse resolve attachment
const auto *color_attachments = subpass_ci.pColorAttachments;
const auto *color_resolve = subpass_ci.pResolveAttachments;
if (color_resolve && color_attachments) {
for (uint32_t i = 0; i < subpass_ci.colorAttachmentCount; i++) {
const auto &color_attach = color_attachments[i].attachment;
const auto &resolve_attach = subpass_ci.pResolveAttachments[i].attachment;
if ((color_attach != VK_ATTACHMENT_UNUSED) && (resolve_attach != VK_ATTACHMENT_UNUSED)) {
action("color", "resolve read", color_attach, resolve_attach, attachment_views[color_attach],
SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kColorAttachment, offset, extent, 0);
action("color", "resolve write", color_attach, resolve_attach, attachment_views[resolve_attach],
SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kColorAttachment, offset, extent, 0);
}
}
}
// Depth stencil resolve only if the extension is present
const auto ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_ci.pNext);
if (ds_resolve && ds_resolve->pDepthStencilResolveAttachment &&
(ds_resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) && subpass_ci.pDepthStencilAttachment &&
(subpass_ci.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
const auto src_at = subpass_ci.pDepthStencilAttachment->attachment;
const auto src_ci = attachment_ci[src_at];
// The formats are required to match so we can pick either
const bool resolve_depth = (ds_resolve->depthResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasDepth(src_ci.format);
const bool resolve_stencil = (ds_resolve->stencilResolveMode != VK_RESOLVE_MODE_NONE) && FormatHasStencil(src_ci.format);
const auto dst_at = ds_resolve->pDepthStencilResolveAttachment->attachment;
VkImageAspectFlags aspect_mask = 0u;
// Figure out which aspects are actually touched during resolve operations
const char *aspect_string = nullptr;
if (resolve_depth && resolve_stencil) {
// Validate all aspects together
aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
aspect_string = "depth/stencil";
} else if (resolve_depth) {
// Validate depth only
aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
aspect_string = "depth";
} else if (resolve_stencil) {
// Validate all stencil only
aspect_mask = VK_IMAGE_ASPECT_STENCIL_BIT;
aspect_string = "stencil";
}
if (aspect_mask) {
action(aspect_string, "resolve read", src_at, dst_at, attachment_views[src_at],
SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ, SyncOrdering::kRaster, offset, extent, aspect_mask);
action(aspect_string, "resolve write", src_at, dst_at, attachment_views[dst_at],
SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster, offset, extent, aspect_mask);
}
}
}
// Action for validating resolve operations
class ValidateResolveAction {
public:
ValidateResolveAction(VkRenderPass render_pass, uint32_t subpass, const AccessContext &context,
const CommandExecutionContext &ex_context, const char *func_name)
: render_pass_(render_pass),
subpass_(subpass),
context_(context),
ex_context_(ex_context),
func_name_(func_name),
skip_(false) {}
void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
HazardResult hazard;
hazard = context_.DetectHazard(view, current_usage, ordering_rule, offset, extent, aspect_mask);
if (hazard.hazard) {
skip_ |=
ex_context_.GetSyncState().LogError(render_pass_, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s in subpass %" PRIu32 "during %s %s, from attachment %" PRIu32
" to resolve attachment %" PRIu32 ". Access info %s.",
func_name_, string_SyncHazard(hazard.hazard), subpass_, aspect_name,
attachment_name, src_at, dst_at, ex_context_.FormatUsage(hazard).c_str());
}
}
// Providing a mechanism for the constructing caller to get the result of the validation
bool GetSkip() const { return skip_; }
private:
VkRenderPass render_pass_;
const uint32_t subpass_;
const AccessContext &context_;
const CommandExecutionContext &ex_context_;
const char *func_name_;
bool skip_;
};
// Update action for resolve operations
class UpdateStateResolveAction {
public:
UpdateStateResolveAction(AccessContext &context, const ResourceUsageTag &tag) : context_(context), tag_(tag) {}
void operator()(const char *aspect_name, const char *attachment_name, uint32_t src_at, uint32_t dst_at,
const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask) {
// Ignores validation only arguments...
context_.UpdateAccessState(view, current_usage, ordering_rule, offset, extent, aspect_mask, tag_);
}
private:
AccessContext &context_;
const ResourceUsageTag &tag_;
};
void HazardResult::Set(const ResourceAccessState *access_state_, SyncStageAccessIndex usage_index_, SyncHazard hazard_,
const SyncStageAccessFlags &prior_, const ResourceUsageTag &tag_) {
access_state = std::unique_ptr<const ResourceAccessState>(new ResourceAccessState(*access_state_));
usage_index = usage_index_;
hazard = hazard_;
prior_access = prior_;
tag = tag_;
}
AccessContext::AccessContext(uint32_t subpass, VkQueueFlags queue_flags,
const std::vector<SubpassDependencyGraphNode> &dependencies,
const std::vector<AccessContext> &contexts, const AccessContext *external_context) {
Reset();
const auto &subpass_dep = dependencies[subpass];
prev_.reserve(subpass_dep.prev.size());
prev_by_subpass_.resize(subpass, nullptr); // Can't be more prevs than the subpass we're on
for (const auto &prev_dep : subpass_dep.prev) {
const auto prev_pass = prev_dep.first->pass;
const auto &prev_barriers = prev_dep.second;
assert(prev_dep.second.size());
prev_.emplace_back(&contexts[prev_pass], queue_flags, prev_barriers);
prev_by_subpass_[prev_pass] = &prev_.back();
}
async_.reserve(subpass_dep.async.size());
for (const auto async_subpass : subpass_dep.async) {
async_.emplace_back(&contexts[async_subpass]);
}
if (subpass_dep.barrier_from_external.size()) {
src_external_ = TrackBack(external_context, queue_flags, subpass_dep.barrier_from_external);
}
if (subpass_dep.barrier_to_external.size()) {
dst_external_ = TrackBack(this, queue_flags, subpass_dep.barrier_to_external);
}
}
template <typename Detector>
HazardResult AccessContext::DetectPreviousHazard(AccessAddressType type, const Detector &detector,
const ResourceAccessRange &range) const {
ResourceAccessRangeMap descent_map;
ResolvePreviousAccess(type, range, &descent_map, nullptr);
HazardResult hazard;
for (auto prev = descent_map.begin(); prev != descent_map.end() && !hazard.hazard; ++prev) {
hazard = detector.Detect(prev);
}
return hazard;
}
template <typename Action>
void AccessContext::ForAll(Action &&action) {
for (const auto address_type : kAddressTypes) {
auto &accesses = GetAccessStateMap(address_type);
for (const auto &access : accesses) {
action(address_type, access);
}
}
}
// A recursive range walker for hazard detection, first for the current context and the (DetectHazardRecur) to walk
// the DAG of the contexts (for example subpasses)
template <typename Detector>
HazardResult AccessContext::DetectHazard(AccessAddressType type, const Detector &detector, const ResourceAccessRange &range,
DetectOptions options) const {
HazardResult hazard;
if (static_cast<uint32_t>(options) & DetectOptions::kDetectAsync) {
// Async checks don't require recursive lookups, as the async lists are exhaustive for the top-level context
// so we'll check these first
for (const auto &async_context : async_) {
hazard = async_context->DetectAsyncHazard(type, detector, range);
if (hazard.hazard) return hazard;
}
}
const bool detect_prev = (static_cast<uint32_t>(options) & DetectOptions::kDetectPrevious) != 0;
const auto &accesses = GetAccessStateMap(type);
const auto from = accesses.lower_bound(range);
const auto to = accesses.upper_bound(range);
ResourceAccessRange gap = {range.begin, range.begin};
for (auto pos = from; pos != to; ++pos) {
// Cover any leading gap, or gap between entries
if (detect_prev) {
// TODO: After profiling we may want to change the descent logic such that we don't recur per gap...
// Cover any leading gap, or gap between entries
gap.end = pos->first.begin; // We know this begin is < range.end
if (gap.non_empty()) {
// Recur on all gaps
hazard = DetectPreviousHazard(type, detector, gap);
if (hazard.hazard) return hazard;
}
// Set up for the next gap. If pos..end is >= range.end, loop will exit, and trailing gap will be empty
gap.begin = pos->first.end;
}
hazard = detector.Detect(pos);
if (hazard.hazard) return hazard;
}
if (detect_prev) {
// Detect in the trailing empty as needed
gap.end = range.end;
if (gap.non_empty()) {
hazard = DetectPreviousHazard(type, detector, gap);
}
}
return hazard;
}
// A non recursive range walker for the asynchronous contexts (those we have no barriers with)
template <typename Detector>
HazardResult AccessContext::DetectAsyncHazard(AccessAddressType type, const Detector &detector,
const ResourceAccessRange &range) const {
auto &accesses = GetAccessStateMap(type);
const auto from = accesses.lower_bound(range);
const auto to = accesses.upper_bound(range);
HazardResult hazard;
for (auto pos = from; pos != to && !hazard.hazard; ++pos) {
hazard = detector.DetectAsync(pos, start_tag_);
}
return hazard;
}
struct ApplySubpassTransitionBarriersAction {
explicit ApplySubpassTransitionBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
void operator()(ResourceAccessState *access) const {
assert(access);
access->ApplyBarriers(barriers, true);
}
const std::vector<SyncBarrier> &barriers;
};
struct ApplyTrackbackBarriersAction {
explicit ApplyTrackbackBarriersAction(const std::vector<SyncBarrier> &barriers_) : barriers(barriers_) {}
void operator()(ResourceAccessState *access) const {
assert(access);
assert(!access->HasPendingState());
access->ApplyBarriers(barriers, false);
access->ApplyPendingBarriers(kCurrentCommandTag);
}
const std::vector<SyncBarrier> &barriers;
};
// Splits a single map entry into piece matching the entries in [first, last) the total range over [first, last) must be
// contained with entry. Entry must be an iterator pointing to dest, first and last must be iterators pointing to a
// *different* map from dest.
// Returns the position past the last resolved range -- the entry covering the remainder of entry->first not included in the
// range [first, last)
template <typename BarrierAction>
static void ResolveMapToEntry(ResourceAccessRangeMap *dest, ResourceAccessRangeMap::iterator entry,
ResourceAccessRangeMap::const_iterator first, ResourceAccessRangeMap::const_iterator last,
BarrierAction &barrier_action) {
auto at = entry;
for (auto pos = first; pos != last; ++pos) {
// Every member of the input iterator range must fit within the remaining portion of entry
assert(at->first.includes(pos->first));
assert(at != dest->end());
// Trim up at to the same size as the entry to resolve
at = sparse_container::split(at, *dest, pos->first);
auto access = pos->second; // intentional copy
barrier_action(&access);
at->second.Resolve(access);
++at; // Go to the remaining unused section of entry
}
}
static SyncBarrier MergeBarriers(const std::vector<SyncBarrier> &barriers) {
SyncBarrier merged = {};
for (const auto &barrier : barriers) {
merged.Merge(barrier);
}
return merged;
}
template <typename BarrierAction>
void AccessContext::ResolveAccessRange(AccessAddressType type, const ResourceAccessRange &range, BarrierAction &barrier_action,
ResourceAccessRangeMap *resolve_map, const ResourceAccessState *infill_state,
bool recur_to_infill) const {
if (!range.non_empty()) return;
ResourceRangeMergeIterator current(*resolve_map, GetAccessStateMap(type), range.begin);
while (current->range.non_empty() && range.includes(current->range.begin)) {
const auto current_range = current->range & range;
if (current->pos_B->valid) {
const auto &src_pos = current->pos_B->lower_bound;
auto access = src_pos->second; // intentional copy
barrier_action(&access);
if (current->pos_A->valid) {
const auto trimmed = sparse_container::split(current->pos_A->lower_bound, *resolve_map, current_range);
trimmed->second.Resolve(access);
current.invalidate_A(trimmed);
} else {
auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current_range, access));
current.invalidate_A(inserted); // Update the parallel iterator to point at the insert segment
}
} else {
// we have to descend to fill this gap
if (recur_to_infill) {
if (current->pos_A->valid) {
// Dest is valid, so we need to accumulate along the DAG and then resolve... in an N-to-1 resolve operation
ResourceAccessRangeMap gap_map;
ResolvePreviousAccess(type, current_range, &gap_map, infill_state);
ResolveMapToEntry(resolve_map, current->pos_A->lower_bound, gap_map.begin(), gap_map.end(), barrier_action);
} else {
// There isn't anything in dest in current)range, so we can accumulate directly into it.
ResolvePreviousAccess(type, current_range, resolve_map, infill_state);
// Need to apply the barrier to the accesses we accumulated, noting that we haven't updated current
for (auto pos = resolve_map->lower_bound(current_range); pos != current->pos_A->lower_bound; ++pos) {
barrier_action(&pos->second);
}
}
// Given that there could be gaps we need to seek carefully to not repeatedly search the same gaps in the next
// iterator of the outer while.
// Set the parallel iterator to the end of this range s.t. ++ will move us to the next range whether or
// not the end of the range is a gap. For the seek to work, first we need to warn the parallel iterator
// we stepped on the dest map
const auto seek_to = current_range.end - 1; // The subtraction is safe as range can't be empty (loop condition)
current.invalidate_A(); // Changes current->range
current.seek(seek_to);
} else if (!current->pos_A->valid && infill_state) {
// If we didn't find anything in the current range, and we aren't reccuring... we infill if required
auto inserted = resolve_map->insert(current->pos_A->lower_bound, std::make_pair(current->range, *infill_state));
current.invalidate_A(inserted); // Update the parallel iterator to point at the correct segment after insert
}
}
++current;
}
// Infill if range goes passed both the current and resolve map prior contents
if (recur_to_infill && (current->range.end < range.end)) {
ResourceAccessRange trailing_fill_range = {current->range.end, range.end};
ResourceAccessRangeMap gap_map;
const auto the_end = resolve_map->end();
ResolvePreviousAccess(type, trailing_fill_range, &gap_map, infill_state);
for (auto &access : gap_map) {
barrier_action(&access.second);
resolve_map->insert(the_end, access);
}
}
}
void AccessContext::ResolvePreviousAccess(AccessAddressType type, const ResourceAccessRange &range,
ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
if ((prev_.size() == 0) && (src_external_.context == nullptr)) {
if (range.non_empty() && infill_state) {
descent_map->insert(std::make_pair(range, *infill_state));
}
} else {
// Look for something to fill the gap further along.
for (const auto &prev_dep : prev_) {
const ApplyTrackbackBarriersAction barrier_action(prev_dep.barriers);
prev_dep.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
}
if (src_external_.context) {
const ApplyTrackbackBarriersAction barrier_action(src_external_.barriers);
src_external_.context->ResolveAccessRange(type, range, barrier_action, descent_map, infill_state);
}
}
}
// Non-lazy import of all accesses, WaitEvents needs this.
void AccessContext::ResolvePreviousAccesses() {
ResourceAccessState default_state;
for (const auto address_type : kAddressTypes) {
ResolvePreviousAccess(address_type, kFullRange, &GetAccessStateMap(address_type), &default_state);
}
}
AccessAddressType AccessContext::ImageAddressType(const IMAGE_STATE &image) {
return (image.fragment_encoder->IsLinearImage()) ? AccessAddressType::kLinear : AccessAddressType::kIdealized;
}
static SyncStageAccessIndex ColorLoadUsage(VkAttachmentLoadOp load_op) {
const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_READ
: SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE;
return stage_access;
}
static SyncStageAccessIndex DepthStencilLoadUsage(VkAttachmentLoadOp load_op) {
const auto stage_access = (load_op == VK_ATTACHMENT_LOAD_OP_LOAD) ? SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_READ
: SYNC_EARLY_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE;
return stage_access;
}
// Caller must manage returned pointer
static AccessContext *CreateStoreResolveProxyContext(const AccessContext &context, const RENDER_PASS_STATE &rp_state,
uint32_t subpass, const VkRect2D &render_area,
std::vector<const IMAGE_VIEW_STATE *> attachment_views) {
auto *proxy = new AccessContext(context);
proxy->UpdateAttachmentResolveAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
proxy->UpdateAttachmentStoreAccess(rp_state, render_area, attachment_views, subpass, kCurrentCommandTag);
return proxy;
}
template <typename BarrierAction>
class ResolveAccessRangeFunctor {
public:
ResolveAccessRangeFunctor(const AccessContext &context, AccessAddressType address_type, ResourceAccessRangeMap *descent_map,
const ResourceAccessState *infill_state, BarrierAction &barrier_action)
: context_(context),
address_type_(address_type),
descent_map_(descent_map),
infill_state_(infill_state),
barrier_action_(barrier_action) {}
ResolveAccessRangeFunctor() = delete;
void operator()(const ResourceAccessRange &range) const {
context_.ResolveAccessRange(address_type_, range, barrier_action_, descent_map_, infill_state_);
}
private:
const AccessContext &context_;
const AccessAddressType address_type_;
ResourceAccessRangeMap *const descent_map_;
const ResourceAccessState *infill_state_;
BarrierAction &barrier_action_;
};
template <typename BarrierAction>
void AccessContext::ResolveAccessRange(const IMAGE_STATE &image_state, const VkImageSubresourceRange &subresource_range,
BarrierAction &barrier_action, AccessAddressType address_type,
ResourceAccessRangeMap *descent_map, const ResourceAccessState *infill_state) const {
const ResolveAccessRangeFunctor<BarrierAction> action(*this, address_type, descent_map, infill_state, barrier_action);
ApplyOverImageRange(image_state, subresource_range, action);
}
// Layout transitions are handled as if the were occuring in the beginning of the next subpass
bool AccessContext::ValidateLayoutTransitions(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
const VkRect2D &render_area, uint32_t subpass,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
const char *func_name) const {
bool skip = false;
// As validation methods are const and precede the record/update phase, for any tranistions from the immediately
// previous subpass, we have to validate them against a copy of the AccessContext, with resolve operations applied, as
// those affects have not been recorded yet.
//
// Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
// to apply and only copy then, if this proves a hot spot.
std::unique_ptr<AccessContext> proxy_for_prev;
TrackBack proxy_track_back;
const auto &transitions = rp_state.subpass_transitions[subpass];
for (const auto &transition : transitions) {
const bool prev_needs_proxy = transition.prev_pass != VK_SUBPASS_EXTERNAL && (transition.prev_pass + 1 == subpass);
const auto *track_back = GetTrackBackFromSubpass(transition.prev_pass);
if (prev_needs_proxy) {
if (!proxy_for_prev) {
proxy_for_prev.reset(CreateStoreResolveProxyContext(*track_back->context, rp_state, transition.prev_pass,
render_area, attachment_views));
proxy_track_back = *track_back;
proxy_track_back.context = proxy_for_prev.get();
}
track_back = &proxy_track_back;
}
auto hazard = DetectSubpassTransitionHazard(*track_back, attachment_views[transition.attachment]);
if (hazard.hazard) {
skip |= ex_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
" image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
func_name, string_SyncHazard(hazard.hazard), subpass, transition.attachment,
string_VkImageLayout(transition.old_layout),
string_VkImageLayout(transition.new_layout),
ex_context.FormatUsage(hazard).c_str());
}
}
return skip;
}
bool AccessContext::ValidateLoadOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
const VkRect2D &render_area, uint32_t subpass,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
const char *func_name) const {
bool skip = false;
const auto *attachment_ci = rp_state.createInfo.pAttachments;
VkExtent3D extent = CastTo3D(render_area.extent);
VkOffset3D offset = CastTo3D(render_area.offset);
for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
if (subpass == rp_state.attachment_first_subpass[i]) {
if (attachment_views[i] == nullptr) continue;
const IMAGE_VIEW_STATE &view = *attachment_views[i];
const IMAGE_STATE *image = view.image_state.get();
if (image == nullptr) continue;
const auto &ci = attachment_ci[i];
// Need check in the following way
// 1) if the usage bit isn't in the dest_access_scope, and there is layout traniition for initial use, report hazard
// vs. transition
// 2) if there isn't a layout transition, we need to look at the external context with a "detect hazard" operation
// for each aspect loaded.
const bool has_depth = FormatHasDepth(ci.format);
const bool has_stencil = FormatHasStencil(ci.format);
const bool is_color = !(has_depth || has_stencil);
const SyncStageAccessIndex load_index = has_depth ? DepthStencilLoadUsage(ci.loadOp) : ColorLoadUsage(ci.loadOp);
const SyncStageAccessIndex stencil_load_index = has_stencil ? DepthStencilLoadUsage(ci.stencilLoadOp) : load_index;
HazardResult hazard;
const char *aspect = nullptr;
auto hazard_range = view.normalized_subresource_range;
bool checked_stencil = false;
if (is_color) {
hazard = DetectHazard(*image, load_index, view.normalized_subresource_range, SyncOrdering::kColorAttachment, offset,
extent);
aspect = "color";
} else {
if (has_depth) {
hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
hazard = DetectHazard(*image, load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset, extent);
aspect = "depth";
}
if (!hazard.hazard && has_stencil) {
hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
hazard = DetectHazard(*image, stencil_load_index, hazard_range, SyncOrdering::kDepthStencilAttachment, offset,
extent);
aspect = "stencil";
checked_stencil = true;
}
}
if (hazard.hazard) {
auto load_op_string = string_VkAttachmentLoadOp(checked_stencil ? ci.stencilLoadOp : ci.loadOp);
const auto &sync_state = ex_context.GetSyncState();
if (hazard.tag == kCurrentCommandTag) {
// Hazard vs. ILT
skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s vs. layout transition in subpass %" PRIu32 " for attachment %" PRIu32
" aspect %s during load with loadOp %s.",
func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string);
} else {
skip |= sync_state.LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
" aspect %s during load with loadOp %s. Access info %s.",
func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect, load_op_string,
ex_context.FormatUsage(hazard).c_str());
}
}
}
}
return skip;
}
// Store operation validation can ignore resolve (before it) and layout tranistions after it. The first is ignored
// because of the ordering guarantees w.r.t. sample access and that the resolve validation hasn't altered the state, because
// store is part of the same Next/End operation.
// The latter is handled in layout transistion validation directly
bool AccessContext::ValidateStoreOperation(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
const VkRect2D &render_area, uint32_t subpass,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
const char *func_name) const {
bool skip = false;
const auto *attachment_ci = rp_state.createInfo.pAttachments;
VkExtent3D extent = CastTo3D(render_area.extent);
VkOffset3D offset = CastTo3D(render_area.offset);
for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
if (subpass == rp_state.attachment_last_subpass[i]) {
if (attachment_views[i] == nullptr) continue;
const IMAGE_VIEW_STATE &view = *attachment_views[i];
const IMAGE_STATE *image = view.image_state.get();
if (image == nullptr) continue;
const auto &ci = attachment_ci[i];
// The spec states that "don't care" is an operation with VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
// so we assume that an implementation is *free* to write in that case, meaning that for correctness
// sake, we treat DONT_CARE as writing.
const bool has_depth = FormatHasDepth(ci.format);
const bool has_stencil = FormatHasStencil(ci.format);
const bool is_color = !(has_depth || has_stencil);
const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
if (!has_stencil && !store_op_stores) continue;
HazardResult hazard;
const char *aspect = nullptr;
bool checked_stencil = false;
if (is_color) {
hazard = DetectHazard(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
view.normalized_subresource_range, SyncOrdering::kRaster, offset, extent);
aspect = "color";
} else {
const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
auto hazard_range = view.normalized_subresource_range;
if (has_depth && store_op_stores) {
hazard_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
SyncOrdering::kRaster, offset, extent);
aspect = "depth";
}
if (!hazard.hazard && has_stencil && stencil_op_stores) {
hazard_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
hazard = DetectHazard(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, hazard_range,
SyncOrdering::kRaster, offset, extent);
aspect = "stencil";
checked_stencil = true;
}
}
if (hazard.hazard) {
const char *const op_type_string = checked_stencil ? "stencilStoreOp" : "storeOp";
const char *const store_op_string = string_VkAttachmentStoreOp(checked_stencil ? ci.stencilStoreOp : ci.storeOp);
skip |= ex_context.GetSyncState().LogError(rp_state.renderPass, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s in subpass %" PRIu32 " for attachment %" PRIu32
" %s aspect during store with %s %s. Access info %s",
func_name, string_SyncHazard(hazard.hazard), subpass, i, aspect,
op_type_string, store_op_string, ex_context.FormatUsage(hazard).c_str());
}
}
}
return skip;
}
bool AccessContext::ValidateResolveOperations(const CommandExecutionContext &ex_context, const RENDER_PASS_STATE &rp_state,
const VkRect2D &render_area,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, const char *func_name,
uint32_t subpass) const {
ValidateResolveAction validate_action(rp_state.renderPass, subpass, *this, ex_context, func_name);
ResolveOperation(validate_action, rp_state, render_area, attachment_views, subpass);
return validate_action.GetSkip();
}
class HazardDetector {
SyncStageAccessIndex usage_index_;
public:
HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const { return pos->second.DetectHazard(usage_index_); }
HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
return pos->second.DetectAsyncHazard(usage_index_, start_tag);
}
explicit HazardDetector(SyncStageAccessIndex usage) : usage_index_(usage) {}
};
class HazardDetectorWithOrdering {
const SyncStageAccessIndex usage_index_;
const SyncOrdering ordering_rule_;
public:
HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
return pos->second.DetectHazard(usage_index_, ordering_rule_);
}
HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
return pos->second.DetectAsyncHazard(usage_index_, start_tag);
}
HazardDetectorWithOrdering(SyncStageAccessIndex usage, SyncOrdering ordering) : usage_index_(usage), ordering_rule_(ordering) {}
};
HazardResult AccessContext::DetectHazard(const BUFFER_STATE &buffer, SyncStageAccessIndex usage_index,
const ResourceAccessRange &range) const {
if (!SimpleBinding(buffer)) return HazardResult();
const auto base_address = ResourceBaseAddress(buffer);
HazardDetector detector(usage_index);
return DetectHazard(AccessAddressType::kLinear, detector, (range + base_address), DetectOptions::kDetectAll);
}
template <typename Detector>
HazardResult AccessContext::DetectHazard(Detector &detector, const IMAGE_STATE &image,
const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
const VkExtent3D &extent, DetectOptions options) const {
if (!SimpleBinding(image)) return HazardResult();
const auto base_address = ResourceBaseAddress(image);
subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
base_address);
const auto address_type = ImageAddressType(image);
for (; range_gen->non_empty(); ++range_gen) {
HazardResult hazard = DetectHazard(address_type, detector, *range_gen, options);
if (hazard.hazard) return hazard;
}
return HazardResult();
}
HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
const VkExtent3D &extent) const {
VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
subresource.layerCount};
return DetectHazard(image, current_usage, subresource_range, offset, extent);
}
HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
const VkExtent3D &extent) const {
HazardDetector detector(current_usage);
return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
}
HazardResult AccessContext::DetectHazard(const IMAGE_STATE &image, SyncStageAccessIndex current_usage,
const VkImageSubresourceRange &subresource_range, SyncOrdering ordering_rule,
const VkOffset3D &offset, const VkExtent3D &extent) const {
HazardDetectorWithOrdering detector(current_usage, ordering_rule);
return DetectHazard(detector, image, subresource_range, offset, extent, DetectOptions::kDetectAll);
}
// Some common code for looking at attachments, if there's anything wrong, we return no hazard, core validation
// should have reported the issue regarding an invalid attachment entry
HazardResult AccessContext::DetectHazard(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage,
SyncOrdering ordering_rule, const VkOffset3D &offset, const VkExtent3D &extent,
VkImageAspectFlags aspect_mask) const {
if (view != nullptr) {
const IMAGE_STATE *image = view->image_state.get();
if (image != nullptr) {
auto *detect_range = &view->normalized_subresource_range;
VkImageSubresourceRange masked_range;
if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
masked_range = view->normalized_subresource_range;
masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
detect_range = &masked_range;
}
// NOTE: The range encoding code is not robust to invalid ranges, so we protect it from our change
if (detect_range->aspectMask) {
return DetectHazard(*image, current_usage, *detect_range, ordering_rule, offset, extent);
}
}
}
return HazardResult();
}
class BarrierHazardDetector {
public:
BarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
SyncStageAccessFlags src_access_scope)
: usage_index_(usage_index), src_exec_scope_(src_exec_scope), src_access_scope_(src_access_scope) {}
HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_);
}
HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
// Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
return pos->second.DetectAsyncHazard(usage_index_, start_tag);
}
private:
SyncStageAccessIndex usage_index_;
VkPipelineStageFlags src_exec_scope_;
SyncStageAccessFlags src_access_scope_;
};
class EventBarrierHazardDetector {
public:
EventBarrierHazardDetector(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
SyncStageAccessFlags src_access_scope, const SyncEventState::ScopeMap &event_scope,
const ResourceUsageTag &scope_tag)
: usage_index_(usage_index),
src_exec_scope_(src_exec_scope),
src_access_scope_(src_access_scope),
event_scope_(event_scope),
scope_pos_(event_scope.cbegin()),
scope_end_(event_scope.cend()),
scope_tag_(scope_tag) {}
HazardResult Detect(const ResourceAccessRangeMap::const_iterator &pos) const {
// TODO NOTE: This is almost the slowest way to do this... need to intelligently walk this...
// Need to find a more efficient sync, since we know pos->first is strictly increasing call to call
// NOTE: "cached_lower_bound_impl" with upgrades could do this.
if (scope_pos_ == scope_end_) return HazardResult();
if (!scope_pos_->first.intersects(pos->first)) {
event_scope_.lower_bound(pos->first);
if ((scope_pos_ == scope_end_) || !scope_pos_->first.intersects(pos->first)) return HazardResult();
}
// Some portion of this pos is in the event_scope, so check for a barrier hazard
return pos->second.DetectBarrierHazard(usage_index_, src_exec_scope_, src_access_scope_, scope_tag_);
}
HazardResult DetectAsync(const ResourceAccessRangeMap::const_iterator &pos, const ResourceUsageTag &start_tag) const {
// Async barrier hazard detection can use the same path as the usage index is not IsRead, but is IsWrite
return pos->second.DetectAsyncHazard(usage_index_, start_tag);
}
private:
SyncStageAccessIndex usage_index_;
VkPipelineStageFlags src_exec_scope_;
SyncStageAccessFlags src_access_scope_;
const SyncEventState::ScopeMap &event_scope_;
SyncEventState::ScopeMap::const_iterator scope_pos_;
SyncEventState::ScopeMap::const_iterator scope_end_;
const ResourceUsageTag &scope_tag_;
};
HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
const SyncStageAccessFlags &src_access_scope,
const VkImageSubresourceRange &subresource_range,
const SyncEventState &sync_event, DetectOptions options) const {
// It's not particularly DRY to get the address type in this function as well as lower down, but we have to select the
// first access scope map to use, and there's no easy way to plumb it in below.
const auto address_type = ImageAddressType(image);
const auto &event_scope = sync_event.FirstScope(address_type);
EventBarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope,
event_scope, sync_event.first_scope_tag);
VkOffset3D zero_offset = {0, 0, 0};
return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
}
HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
const SyncStageAccessFlags &src_access_scope,
const VkImageSubresourceRange &subresource_range,
const DetectOptions options) const {
BarrierHazardDetector detector(SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION, src_exec_scope, src_access_scope);
VkOffset3D zero_offset = {0, 0, 0};
return DetectHazard(detector, image, subresource_range, zero_offset, image.createInfo.extent, options);
}
HazardResult AccessContext::DetectImageBarrierHazard(const IMAGE_STATE &image, VkPipelineStageFlags src_exec_scope,
const SyncStageAccessFlags &src_stage_accesses,
const VkImageMemoryBarrier &barrier) const {
auto subresource_range = NormalizeSubresourceRange(image.createInfo, barrier.subresourceRange);
const auto src_access_scope = SyncStageAccess::AccessScope(src_stage_accesses, barrier.srcAccessMask);
return DetectImageBarrierHazard(image, src_exec_scope, src_access_scope, subresource_range, kDetectAll);
}
HazardResult AccessContext::DetectImageBarrierHazard(const SyncImageMemoryBarrier &image_barrier) const {
return DetectImageBarrierHazard(*image_barrier.image.get(), image_barrier.barrier.src_exec_scope,
image_barrier.barrier.src_access_scope, image_barrier.range.subresource_range, kDetectAll);
}
template <typename Flags, typename Map>
SyncStageAccessFlags AccessScopeImpl(Flags flag_mask, const Map &map) {
SyncStageAccessFlags scope = 0;
for (const auto &bit_scope : map) {
if (flag_mask < bit_scope.first) break;
if (flag_mask & bit_scope.first) {
scope |= bit_scope.second;
}
}
return scope;
}
SyncStageAccessFlags SyncStageAccess::AccessScopeByStage(VkPipelineStageFlags stages) {
return AccessScopeImpl(stages, syncStageAccessMaskByStageBit);
}
SyncStageAccessFlags SyncStageAccess::AccessScopeByAccess(VkAccessFlags accesses) {
return AccessScopeImpl(accesses, syncStageAccessMaskByAccessBit);
}
// Getting from stage mask and access mask to stage/acess masks is something we need to be good at...
SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags stages, VkAccessFlags accesses) {
// The access scope is the intersection of all stage/access types possible for the enabled stages and the enables
// accesses (after doing a couple factoring of common terms the union of stage/access intersections is the intersections
// of the union of all stage/access types for all the stages and the same unions for the access mask...
return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
}
template <typename Action>
void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
// TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
// that do incrementalupdates
assert(accesses);
auto pos = accesses->lower_bound(range);
if (pos == accesses->end() || !pos->first.intersects(range)) {
// The range is empty, fill it with a default value.
pos = action.Infill(accesses, pos, range);
} else if (range.begin < pos->first.begin) {
// Leading empty space, infill
pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
} else if (pos->first.begin < range.begin) {
// Trim the beginning if needed
pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
++pos;
}
const auto the_end = accesses->end();
while ((pos != the_end) && pos->first.intersects(range)) {
if (pos->first.end > range.end) {
pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
}
pos = action(accesses, pos);
if (pos == the_end) break;
auto next = pos;
++next;
if ((pos->first.end < range.end) && (next != the_end) && !next->first.is_subsequent_to(pos->first)) {
// Need to infill if next is disjoint
VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
ResourceAccessRange new_range(pos->first.end, limit);
next = action.Infill(accesses, next, new_range);
}
pos = next;
}
}
// Give a comparable interface for range generators and ranges
template <typename Action>
inline void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, ResourceAccessRange *range) {
assert(range);
UpdateMemoryAccessState(accesses, *range, action);
}
template <typename Action, typename RangeGen>
void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const Action &action, RangeGen *range_gen_arg) {
assert(range_gen_arg);
RangeGen &range_gen = *range_gen_arg; // Non-const references must be * by style requirement but deref-ing * iterator is a pain
for (; range_gen->non_empty(); ++range_gen) {
UpdateMemoryAccessState(accesses, *range_gen, action);
}
}
struct UpdateMemoryAccessStateFunctor {
using Iterator = ResourceAccessRangeMap::iterator;
Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const {
// this is only called on gaps, and never returns a gap.
ResourceAccessState default_state;
context.ResolvePreviousAccess(type, range, accesses, &default_state);
return accesses->lower_bound(range);
}
Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
auto &access_state = pos->second;
access_state.Update(usage, ordering_rule, tag);
return pos;
}
UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
SyncOrdering ordering_rule_, const ResourceUsageTag &tag_)
: type(type_), context(context_), usage(usage_), ordering_rule(ordering_rule_), tag(tag_) {}
const AccessAddressType type;
const AccessContext &context;
const SyncStageAccessIndex usage;
const SyncOrdering ordering_rule;
const ResourceUsageTag &tag;
};
// The barrier operation for pipeline and subpass dependencies`
struct PipelineBarrierOp {
SyncBarrier barrier;
bool layout_transition;
PipelineBarrierOp(const SyncBarrier &barrier_, bool layout_transition_)
: barrier(barrier_), layout_transition(layout_transition_) {}
PipelineBarrierOp() = default;
PipelineBarrierOp(const PipelineBarrierOp &) = default;
void operator()(ResourceAccessState *access_state) const { access_state->ApplyBarrier(barrier, layout_transition); }
};
// The barrier operation for wait events
struct WaitEventBarrierOp {
const ResourceUsageTag *scope_tag;
SyncBarrier barrier;
bool layout_transition;
WaitEventBarrierOp(const ResourceUsageTag &scope_tag_, const SyncBarrier &barrier_, bool layout_transition_)
: scope_tag(&scope_tag_), barrier(barrier_), layout_transition(layout_transition_) {}
WaitEventBarrierOp() = default;
void operator()(ResourceAccessState *access_state) const {
assert(scope_tag); // Not valid to have a non-scope op executed, default construct included for std::vector support
access_state->ApplyBarrier(*scope_tag, barrier, layout_transition);
}
};
// This functor applies a collection of barriers, updating the "pending state" in each touched memory range, and optionally
// resolves the pending state. Suitable for processing Global memory barriers, or Subpass Barriers when the "final" barrier
// of a collection is known/present.
template <typename BarrierOp>
class ApplyBarrierOpsFunctor {
public:
using Iterator = ResourceAccessRangeMap::iterator;
inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
auto &access_state = pos->second;
for (const auto &op : barrier_ops_) {
op(&access_state);
}
if (resolve_) {
// If this is the last (or only) batch, we can do the pending resolve as the last step in this operation to avoid
// another walk
access_state.ApplyPendingBarriers(tag_);
}
return pos;
}
// A valid tag is required IFF layout_transition is true, as transitions are write ops
ApplyBarrierOpsFunctor(bool resolve, size_t size_hint, const ResourceUsageTag &tag)
: resolve_(resolve), barrier_ops_(), tag_(tag) {
barrier_ops_.reserve(size_hint);
}
void EmplaceBack(const BarrierOp &op) { barrier_ops_.emplace_back(op); }
private:
bool resolve_;
std::vector<BarrierOp> barrier_ops_;
const ResourceUsageTag &tag_;
};
// This functor applies a single barrier, updating the "pending state" in each touched memory range, but does not
// resolve the pendinging state. Suitable for processing Image and Buffer barriers from PipelineBarriers or Events
template <typename BarrierOp>
class ApplyBarrierFunctor {
public:
using Iterator = ResourceAccessRangeMap::iterator;
inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
auto &access_state = pos->second;
barrier_op_(&access_state);
return pos;
}
ApplyBarrierFunctor(const BarrierOp &barrier_op) : barrier_op_(barrier_op) {}
private:
BarrierOp barrier_op_;
};
// This functor resolves the pendinging state.
class ResolvePendingBarrierFunctor {
public:
using Iterator = ResourceAccessRangeMap::iterator;
inline Iterator Infill(ResourceAccessRangeMap *accesses, Iterator pos, ResourceAccessRange range) const { return pos; }
Iterator operator()(ResourceAccessRangeMap *accesses, Iterator pos) const {
auto &access_state = pos->second;
access_state.ApplyPendingBarriers(tag_);
return pos;
}
ResolvePendingBarrierFunctor(const ResourceUsageTag &tag) : tag_(tag) {}
private:
const ResourceUsageTag &tag_;
};
void AccessContext::UpdateAccessState(AccessAddressType type, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
const ResourceAccessRange &range, const ResourceUsageTag &tag) {
UpdateMemoryAccessStateFunctor action(type, *this, current_usage, ordering_rule, tag);
UpdateMemoryAccessState(&GetAccessStateMap(type), range, action);
}
void AccessContext::UpdateAccessState(const BUFFER_STATE &buffer, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
const ResourceAccessRange &range, const ResourceUsageTag &tag) {
if (!SimpleBinding(buffer)) return;
const auto base_address = ResourceBaseAddress(buffer);
UpdateAccessState(AccessAddressType::kLinear, current_usage, ordering_rule, range + base_address, tag);
}
void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
const VkImageSubresourceRange &subresource_range, const VkOffset3D &offset,
const VkExtent3D &extent, const ResourceUsageTag &tag) {
if (!SimpleBinding(image)) return;
const auto base_address = ResourceBaseAddress(image);
subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, offset, extent,
base_address);
const auto address_type = ImageAddressType(image);
UpdateMemoryAccessStateFunctor action(address_type, *this, current_usage, ordering_rule, tag);
for (; range_gen->non_empty(); ++range_gen) {
UpdateMemoryAccessState(&GetAccessStateMap(address_type), *range_gen, action);
}
}
void AccessContext::UpdateAccessState(const IMAGE_VIEW_STATE *view, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
const VkOffset3D &offset, const VkExtent3D &extent, VkImageAspectFlags aspect_mask,
const ResourceUsageTag &tag) {
if (view != nullptr) {
const IMAGE_STATE *image = view->image_state.get();
if (image != nullptr) {
auto *update_range = &view->normalized_subresource_range;
VkImageSubresourceRange masked_range;
if (aspect_mask) { // If present and non-zero, restrict the normalized range to aspects present in aspect_mask
masked_range = view->normalized_subresource_range;
masked_range.aspectMask = aspect_mask & masked_range.aspectMask;
update_range = &masked_range;
}
UpdateAccessState(*image, current_usage, ordering_rule, *update_range, offset, extent, tag);
}
}
}
void AccessContext::UpdateAccessState(const IMAGE_STATE &image, SyncStageAccessIndex current_usage, SyncOrdering ordering_rule,
const VkImageSubresourceLayers &subresource, const VkOffset3D &offset,
const VkExtent3D &extent, const ResourceUsageTag &tag) {
VkImageSubresourceRange subresource_range = {subresource.aspectMask, subresource.mipLevel, 1, subresource.baseArrayLayer,
subresource.layerCount};
UpdateAccessState(image, current_usage, ordering_rule, subresource_range, offset, extent, tag);
}
template <typename Action>
void AccessContext::UpdateResourceAccess(const BUFFER_STATE &buffer, const ResourceAccessRange &range, const Action action) {
if (!SimpleBinding(buffer)) return;
const auto base_address = ResourceBaseAddress(buffer);
UpdateMemoryAccessState(&GetAccessStateMap(AccessAddressType::kLinear), (range + base_address), action);
}
template <typename Action>
void AccessContext::UpdateResourceAccess(const IMAGE_STATE &image, const VkImageSubresourceRange &subresource_range,
const Action action) {
if (!SimpleBinding(image)) return;
const auto address_type = ImageAddressType(image);
auto *accesses = &GetAccessStateMap(address_type);
const auto base_address = ResourceBaseAddress(image);
subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), subresource_range, {0, 0, 0},
image.createInfo.extent, base_address);
for (; range_gen->non_empty(); ++range_gen) {
UpdateMemoryAccessState(accesses, *range_gen, action);
}
}
void AccessContext::UpdateAttachmentResolveAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
const ResourceUsageTag &tag) {
UpdateStateResolveAction update(*this, tag);
ResolveOperation(update, rp_state, render_area, attachment_views, subpass);
}
void AccessContext::UpdateAttachmentStoreAccess(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views, uint32_t subpass,
const ResourceUsageTag &tag) {
const auto *attachment_ci = rp_state.createInfo.pAttachments;
VkExtent3D extent = CastTo3D(render_area.extent);
VkOffset3D offset = CastTo3D(render_area.offset);
for (uint32_t i = 0; i < rp_state.createInfo.attachmentCount; i++) {
if (rp_state.attachment_last_subpass[i] == subpass) {
if (attachment_views[i] == nullptr) continue; // UNUSED
const auto &view = *attachment_views[i];
const IMAGE_STATE *image = view.image_state.get();
if (image == nullptr) continue;
const auto &ci = attachment_ci[i];
const bool has_depth = FormatHasDepth(ci.format);
const bool has_stencil = FormatHasStencil(ci.format);
const bool is_color = !(has_depth || has_stencil);
const bool store_op_stores = ci.storeOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
if (is_color && store_op_stores) {
UpdateAccessState(*image, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE, SyncOrdering::kRaster,
view.normalized_subresource_range, offset, extent, tag);
} else {
auto update_range = view.normalized_subresource_range;
if (has_depth && store_op_stores) {
update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
update_range, offset, extent, tag);
}
const bool stencil_op_stores = ci.stencilStoreOp != VK_ATTACHMENT_STORE_OP_NONE_QCOM;
if (has_stencil && stencil_op_stores) {
update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
UpdateAccessState(*image, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, SyncOrdering::kRaster,
update_range, offset, extent, tag);
}
}
}
}
}
template <typename Action>
void AccessContext::ApplyToContext(const Action &barrier_action) {
// Note: Barriers do *not* cross context boundaries, applying to accessess within.... (at least for renderpass subpasses)
for (const auto address_type : kAddressTypes) {
UpdateMemoryAccessState(&GetAccessStateMap(address_type), kFullRange, barrier_action);
}
}
void AccessContext::ResolveChildContexts(const std::vector<AccessContext> &contexts) {
for (uint32_t subpass_index = 0; subpass_index < contexts.size(); subpass_index++) {
auto &context = contexts[subpass_index];
ApplyTrackbackBarriersAction barrier_action(context.GetDstExternalTrackBack().barriers);
for (const auto address_type : kAddressTypes) {
context.ResolveAccessRange(address_type, kFullRange, barrier_action, &GetAccessStateMap(address_type), nullptr, false);
}
}
}
// Suitable only for *subpass* access contexts
HazardResult AccessContext::DetectSubpassTransitionHazard(const TrackBack &track_back, const IMAGE_VIEW_STATE *attach_view) const {
if (!attach_view) return HazardResult();
const auto image_state = attach_view->image_state.get();
if (!image_state) return HazardResult();
// We should never ask for a transition from a context we don't have
assert(track_back.context);
// Do the detection against the specific prior context independent of other contexts. (Synchronous only)
// Hazard detection for the transition can be against the merged of the barriers (it only uses src_...)
const auto merged_barrier = MergeBarriers(track_back.barriers);
HazardResult hazard =
track_back.context->DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope, merged_barrier.src_access_scope,
attach_view->normalized_subresource_range, kDetectPrevious);
if (!hazard.hazard) {
// The Async hazard check is against the current context's async set.
hazard = DetectImageBarrierHazard(*image_state, merged_barrier.src_exec_scope, merged_barrier.src_access_scope,
attach_view->normalized_subresource_range, kDetectAsync);
}
return hazard;
}
void AccessContext::RecordLayoutTransitions(const RENDER_PASS_STATE &rp_state, uint32_t subpass,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
const ResourceUsageTag &tag) {
const auto &transitions = rp_state.subpass_transitions[subpass];
const ResourceAccessState empty_infill;
for (const auto &transition : transitions) {
const auto prev_pass = transition.prev_pass;
const auto attachment_view = attachment_views[transition.attachment];
if (!attachment_view) continue;
const auto *image = attachment_view->image_state.get();
if (!image) continue;
if (!SimpleBinding(*image)) continue;
const auto *trackback = GetTrackBackFromSubpass(prev_pass);
assert(trackback);
// Import the attachments into the current context
const auto *prev_context = trackback->context;
assert(prev_context);
const auto address_type = ImageAddressType(*image);
auto &target_map = GetAccessStateMap(address_type);
ApplySubpassTransitionBarriersAction barrier_action(trackback->barriers);
prev_context->ResolveAccessRange(*image, attachment_view->normalized_subresource_range, barrier_action, address_type,
&target_map, &empty_infill);
}
// If there were no transitions skip this global map walk
if (transitions.size()) {
ResolvePendingBarrierFunctor apply_pending_action(tag);
ApplyToContext(apply_pending_action);
}
}
void CommandBufferAccessContext::ApplyGlobalBarriersToEvents(const SyncExecScope &src, const SyncExecScope &dst) {
const bool all_commands_bit = 0 != (src.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
auto *events_context = GetCurrentEventsContext();
assert(events_context);
for (auto &event_pair : *events_context) {
assert(event_pair.second); // Shouldn't be storing empty
auto &sync_event = *event_pair.second;
// Events don't happen at a stage, so we need to check and store the unexpanded ALL_COMMANDS if set for inter-event-calls
if ((sync_event.barriers & src.exec_scope) || all_commands_bit) {
sync_event.barriers |= dst.exec_scope;
sync_event.barriers |= dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
}
}
}
bool CommandBufferAccessContext::ValidateDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
const char *func_name) const {
bool skip = false;
const PIPELINE_STATE *pipe = nullptr;
const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
if (!pipe || !per_sets) {
return skip;
}
using DescriptorClass = cvdescriptorset::DescriptorClass;
using BufferDescriptor = cvdescriptorset::BufferDescriptor;
using ImageDescriptor = cvdescriptorset::ImageDescriptor;
using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
using TexelDescriptor = cvdescriptorset::TexelDescriptor;
for (const auto &stage_state : pipe->stage_state) {
if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
continue;
}
for (const auto &set_binding : stage_state.descriptor_uses) {
cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
set_binding.first.second);
const auto descriptor_type = binding_it.GetType();
cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
auto array_idx = 0;
if (binding_it.IsVariableDescriptorCount()) {
index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
}
SyncStageAccessIndex sync_index =
GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
uint32_t index = i - index_range.start;
const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
switch (descriptor->GetClass()) {
case DescriptorClass::ImageSampler:
case DescriptorClass::Image: {
const IMAGE_VIEW_STATE *img_view_state = nullptr;
VkImageLayout image_layout;
if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
const auto image_sampler_descriptor = static_cast<const ImageSamplerDescriptor *>(descriptor);
img_view_state = image_sampler_descriptor->GetImageViewState();
image_layout = image_sampler_descriptor->GetImageLayout();
} else {
const auto image_descriptor = static_cast<const ImageDescriptor *>(descriptor);
img_view_state = image_descriptor->GetImageViewState();
image_layout = image_descriptor->GetImageLayout();
}
if (!img_view_state) continue;
const IMAGE_STATE *img_state = img_view_state->image_state.get();
VkExtent3D extent = {};
VkOffset3D offset = {};
if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
} else {
extent = img_state->createInfo.extent;
}
HazardResult hazard;
const auto &subresource_range = img_view_state->normalized_subresource_range;
if (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
// Input attachments are subject to raster ordering rules
hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range,
SyncOrdering::kRaster, offset, extent);
} else {
hazard = current_context_->DetectHazard(*img_state, sync_index, subresource_range, offset, extent);
}
if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
skip |= sync_state_->LogError(
img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for %s, in %s, and %s, %s, type: %s, imageLayout: %s, binding #%" PRIu32
", index %" PRIu32 ". Access info %s.",
func_name, string_SyncHazard(hazard.hazard),
sync_state_->report_data->FormatHandle(img_view_state->image_view).c_str(),
sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
string_VkDescriptorType(descriptor_type), string_VkImageLayout(image_layout),
set_binding.first.second, index, FormatUsage(hazard).c_str());
}
break;
}
case DescriptorClass::TexelBuffer: {
auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
if (!buf_view_state) continue;
const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
const ResourceAccessRange range = MakeRange(*buf_view_state);
auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
skip |= sync_state_->LogError(
buf_view_state->buffer_view, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
func_name, string_SyncHazard(hazard.hazard),
sync_state_->report_data->FormatHandle(buf_view_state->buffer_view).c_str(),
sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
FormatUsage(hazard).c_str());
}
break;
}
case DescriptorClass::GeneralBuffer: {
const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
auto buf_state = buffer_descriptor->GetBufferState();
if (!buf_state) continue;
const ResourceAccessRange range =
MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
auto hazard = current_context_->DetectHazard(*buf_state, sync_index, range);
if (hazard.hazard && !sync_state_->SupressedBoundDescriptorWAW(hazard)) {
skip |= sync_state_->LogError(
buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for %s in %s, %s, and %s, type: %s, binding #%d index %d. Access info %s.",
func_name, string_SyncHazard(hazard.hazard),
sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(),
sync_state_->report_data->FormatHandle(pipe->pipeline).c_str(),
sync_state_->report_data->FormatHandle(descriptor_set->GetSet()).c_str(),
string_VkDescriptorType(descriptor_type), set_binding.first.second, index,
FormatUsage(hazard).c_str());
}
break;
}
// TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
default:
break;
}
}
}
}
return skip;
}
void CommandBufferAccessContext::RecordDispatchDrawDescriptorSet(VkPipelineBindPoint pipelineBindPoint,
const ResourceUsageTag &tag) {
const PIPELINE_STATE *pipe = nullptr;
const std::vector<LAST_BOUND_STATE::PER_SET> *per_sets = nullptr;
GetCurrentPipelineAndDesriptorSetsFromCommandBuffer(*cb_state_.get(), pipelineBindPoint, &pipe, &per_sets);
if (!pipe || !per_sets) {
return;
}
using DescriptorClass = cvdescriptorset::DescriptorClass;
using BufferDescriptor = cvdescriptorset::BufferDescriptor;
using ImageDescriptor = cvdescriptorset::ImageDescriptor;
using ImageSamplerDescriptor = cvdescriptorset::ImageSamplerDescriptor;
using TexelDescriptor = cvdescriptorset::TexelDescriptor;
for (const auto &stage_state : pipe->stage_state) {
if (stage_state.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT && pipe->graphicsPipelineCI.pRasterizationState &&
pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
continue;
}
for (const auto &set_binding : stage_state.descriptor_uses) {
cvdescriptorset::DescriptorSet *descriptor_set = (*per_sets)[set_binding.first.first].bound_descriptor_set;
cvdescriptorset::DescriptorSetLayout::ConstBindingIterator binding_it(descriptor_set->GetLayout().get(),
set_binding.first.second);
const auto descriptor_type = binding_it.GetType();
cvdescriptorset::IndexRange index_range = binding_it.GetGlobalIndexRange();
auto array_idx = 0;
if (binding_it.IsVariableDescriptorCount()) {
index_range.end = index_range.start + descriptor_set->GetVariableDescriptorCount();
}
SyncStageAccessIndex sync_index =
GetSyncStageAccessIndexsByDescriptorSet(descriptor_type, set_binding.second, stage_state.stage_flag);
for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) {
const auto *descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i);
switch (descriptor->GetClass()) {
case DescriptorClass::ImageSampler:
case DescriptorClass::Image: {
const IMAGE_VIEW_STATE *img_view_state = nullptr;
if (descriptor->GetClass() == DescriptorClass::ImageSampler) {
img_view_state = static_cast<const ImageSamplerDescriptor *>(descriptor)->GetImageViewState();
} else {
img_view_state = static_cast<const ImageDescriptor *>(descriptor)->GetImageViewState();
}
if (!img_view_state) continue;
const IMAGE_STATE *img_state = img_view_state->image_state.get();
VkExtent3D extent = {};
VkOffset3D offset = {};
if (sync_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ) {
extent = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.extent);
offset = CastTo3D(cb_state_->activeRenderPassBeginInfo.renderArea.offset);
} else {
extent = img_state->createInfo.extent;
}
SyncOrdering ordering_rule = (descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
? SyncOrdering::kRaster
: SyncOrdering::kNonAttachment;
current_context_->UpdateAccessState(*img_state, sync_index, ordering_rule,
img_view_state->normalized_subresource_range, offset, extent, tag);
break;
}
case DescriptorClass::TexelBuffer: {
auto buf_view_state = static_cast<const TexelDescriptor *>(descriptor)->GetBufferViewState();
if (!buf_view_state) continue;
const BUFFER_STATE *buf_state = buf_view_state->buffer_state.get();
const ResourceAccessRange range = MakeRange(*buf_view_state);
current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
break;
}
case DescriptorClass::GeneralBuffer: {
const auto *buffer_descriptor = static_cast<const BufferDescriptor *>(descriptor);
auto buf_state = buffer_descriptor->GetBufferState();
if (!buf_state) continue;
const ResourceAccessRange range =
MakeRange(*buf_state, buffer_descriptor->GetOffset(), buffer_descriptor->GetRange());
current_context_->UpdateAccessState(*buf_state, sync_index, SyncOrdering::kNonAttachment, range, tag);
break;
}
// TODO: INLINE_UNIFORM_BLOCK_EXT, ACCELERATION_STRUCTURE_KHR
default:
break;
}
}
}
}
}
bool CommandBufferAccessContext::ValidateDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const char *func_name) const {
bool skip = false;
const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
if (!pipe) {
return skip;
}
const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
const auto &binding_buffers_size = binding_buffers.size();
const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
for (size_t i = 0; i < binding_descriptions_size; ++i) {
const auto &binding_description = pipe->vertex_binding_descriptions_[i];
if (binding_description.binding < binding_buffers_size) {
const auto &binding_buffer = binding_buffers[binding_description.binding];
if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
auto *buf_state = binding_buffer.buffer_state.get();
const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
vertexCount, binding_description.stride);
auto hazard = current_context_->DetectHazard(*buf_state, SYNC_VERTEX_INPUT_VERTEX_ATTRIBUTE_READ, range);
if (hazard.hazard) {
skip |= sync_state_->LogError(
buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for vertex %s in %s. Access info %s.",
func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(buf_state->buffer).c_str(),
sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
}
}
}
return skip;
}
void CommandBufferAccessContext::RecordDrawVertex(uint32_t vertexCount, uint32_t firstVertex, const ResourceUsageTag &tag) {
const auto *pipe = GetCurrentPipelineFromCommandBuffer(*cb_state_.get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
if (!pipe) {
return;
}
const auto &binding_buffers = cb_state_->current_vertex_buffer_binding_info.vertex_buffer_bindings;
const auto &binding_buffers_size = binding_buffers.size();
const auto &binding_descriptions_size = pipe->vertex_binding_descriptions_.size();
for (size_t i = 0; i < binding_descriptions_size; ++i) {
const auto &binding_description = pipe->vertex_binding_descriptions_[i];
if (binding_description.binding < binding_buffers_size) {
const auto &binding_buffer = binding_buffers[binding_description.binding];
if (binding_buffer.buffer_state == nullptr || binding_buffer.buffer_state->destroyed) continue;
auto *buf_state = binding_buffer.buffer_state.get();
const ResourceAccessRange range = GetBufferRange(binding_buffer.offset, buf_state->createInfo.size, firstVertex,
vertexCount, binding_description.stride);
current_context_->UpdateAccessState(*buf_state, SYNC_VERTEX_INPUT_VERTEX_ATTRIBUTE_READ, SyncOrdering::kNonAttachment,
range, tag);
}
}
}
bool CommandBufferAccessContext::ValidateDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const char *func_name) const {
bool skip = false;
if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) {
return skip;
}
auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
firstIndex, indexCount, index_size);
auto hazard = current_context_->DetectHazard(*index_buf_state, SYNC_VERTEX_INPUT_INDEX_READ, range);
if (hazard.hazard) {
skip |= sync_state_->LogError(
index_buf_state->buffer, string_SyncHazardVUID(hazard.hazard), "%s: Hazard %s for index %s in %s. Access info %s.",
func_name, string_SyncHazard(hazard.hazard), sync_state_->report_data->FormatHandle(index_buf_state->buffer).c_str(),
sync_state_->report_data->FormatHandle(cb_state_->commandBuffer).c_str(), FormatUsage(hazard).c_str());
}
// TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
// We will detect more accurate range in the future.
skip |= ValidateDrawVertex(UINT32_MAX, 0, func_name);
return skip;
}
void CommandBufferAccessContext::RecordDrawVertexIndex(uint32_t indexCount, uint32_t firstIndex, const ResourceUsageTag &tag) {
if (cb_state_->index_buffer_binding.buffer_state == nullptr || cb_state_->index_buffer_binding.buffer_state->destroyed) return;
auto *index_buf_state = cb_state_->index_buffer_binding.buffer_state.get();
const auto index_size = GetIndexAlignment(cb_state_->index_buffer_binding.index_type);
const ResourceAccessRange range = GetBufferRange(cb_state_->index_buffer_binding.offset, index_buf_state->createInfo.size,
firstIndex, indexCount, index_size);
current_context_->UpdateAccessState(*index_buf_state, SYNC_VERTEX_INPUT_INDEX_READ, SyncOrdering::kNonAttachment, range, tag);
// TODO: For now, we detect the whole vertex buffer. Index buffer could be changed until SubmitQueue.
// We will detect more accurate range in the future.
RecordDrawVertex(UINT32_MAX, 0, tag);
}
bool CommandBufferAccessContext::ValidateDrawSubpassAttachment(const char *func_name) const {
bool skip = false;
if (!current_renderpass_context_) return skip;
skip |= current_renderpass_context_->ValidateDrawSubpassAttachment(GetExecutionContext(), *cb_state_.get(), func_name);
return skip;
}
void CommandBufferAccessContext::RecordDrawSubpassAttachment(const ResourceUsageTag &tag) {
if (current_renderpass_context_) {
current_renderpass_context_->RecordDrawSubpassAttachment(*cb_state_.get(), tag);
}
}
void CommandBufferAccessContext::RecordBeginRenderPass(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
const ResourceUsageTag &tag) {
// Create an access context the current renderpass.
render_pass_contexts_.emplace_back(rp_state, render_area, GetQueueFlags(), attachment_views, &cb_access_context_);
current_renderpass_context_ = &render_pass_contexts_.back();
current_renderpass_context_->RecordBeginRenderPass(tag);
current_context_ = ¤t_renderpass_context_->CurrentContext();
}
void CommandBufferAccessContext::RecordNextSubpass(CMD_TYPE command) {
assert(current_renderpass_context_);
auto prev_tag = NextCommandTag(command);
auto next_tag = NextSubcommandTag(command);
current_renderpass_context_->RecordNextSubpass(prev_tag, next_tag);
current_context_ = ¤t_renderpass_context_->CurrentContext();
}
void CommandBufferAccessContext::RecordEndRenderPass(CMD_TYPE command) {
assert(current_renderpass_context_);
if (!current_renderpass_context_) return;
current_renderpass_context_->RecordEndRenderPass(&cb_access_context_, NextCommandTag(command));
current_context_ = &cb_access_context_;
current_renderpass_context_ = nullptr;
}
void CommandBufferAccessContext::RecordDestroyEvent(VkEvent event) {
// Erase is okay with the key not being
const auto *event_state = sync_state_->Get<EVENT_STATE>(event);
if (event_state) {
GetCurrentEventsContext()->Destroy(event_state);
}
}
bool RenderPassAccessContext::ValidateDrawSubpassAttachment(const CommandExecutionContext &ex_context, const CMD_BUFFER_STATE &cmd,
const char *func_name) const {
bool skip = false;
const auto &sync_state = ex_context.GetSyncState();
const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
if (!pipe ||
(pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
return skip;
}
const auto &list = pipe->fragmentShader_writable_output_location_list;
const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
VkExtent3D extent = CastTo3D(render_area_.extent);
VkOffset3D offset = CastTo3D(render_area_.offset);
const auto ¤t_context = CurrentContext();
// Subpass's inputAttachment has been done in ValidateDispatchDrawDescriptorSet
if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
for (const auto location : list) {
if (location >= subpass.colorAttachmentCount ||
subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
continue;
}
const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
HazardResult hazard = current_context.DetectHazard(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
SyncOrdering::kColorAttachment, offset, extent);
if (hazard.hazard) {
skip |= sync_state.LogError(img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for %s in %s, Subpass #%d, and pColorAttachments #%d. Access info %s.",
func_name, string_SyncHazard(hazard.hazard),
sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
location, ex_context.FormatUsage(hazard).c_str());
}
}
}
// PHASE1 TODO: Add layout based read/vs. write selection.
// PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
bool depth_write = false, stencil_write = false;
// PHASE1 TODO: These validation should be in core_checks.
if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
depth_write = true;
}
// PHASE1 TODO: It needs to check if stencil is writable.
// If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
// If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
// PHASE1 TODO: These validation should be in core_checks.
if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
stencil_write = true;
}
// PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
if (depth_write) {
HazardResult hazard =
current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT);
if (hazard.hazard) {
skip |= sync_state.LogError(
img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for %s in %s, Subpass #%d, and depth part of pDepthStencilAttachment. Access info %s.",
func_name, string_SyncHazard(hazard.hazard),
sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
ex_context.FormatUsage(hazard).c_str());
}
}
if (stencil_write) {
HazardResult hazard =
current_context.DetectHazard(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT);
if (hazard.hazard) {
skip |= sync_state.LogError(
img_view_state->image_view, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for %s in %s, Subpass #%d, and stencil part of pDepthStencilAttachment. Access info %s.",
func_name, string_SyncHazard(hazard.hazard),
sync_state.report_data->FormatHandle(img_view_state->image_view).c_str(),
sync_state.report_data->FormatHandle(cmd.commandBuffer).c_str(), cmd.activeSubpass,
ex_context.FormatUsage(hazard).c_str());
}
}
}
return skip;
}
void RenderPassAccessContext::RecordDrawSubpassAttachment(const CMD_BUFFER_STATE &cmd, const ResourceUsageTag &tag) {
const auto *pipe = GetCurrentPipelineFromCommandBuffer(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
if (!pipe ||
(pipe->graphicsPipelineCI.pRasterizationState && pipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable)) {
return;
}
const auto &list = pipe->fragmentShader_writable_output_location_list;
const auto &subpass = rp_state_->createInfo.pSubpasses[current_subpass_];
VkExtent3D extent = CastTo3D(render_area_.extent);
VkOffset3D offset = CastTo3D(render_area_.offset);
auto ¤t_context = CurrentContext();
// Subpass's inputAttachment has been done in RecordDispatchDrawDescriptorSet
if (subpass.pColorAttachments && subpass.colorAttachmentCount && !list.empty()) {
for (const auto location : list) {
if (location >= subpass.colorAttachmentCount ||
subpass.pColorAttachments[location].attachment == VK_ATTACHMENT_UNUSED) {
continue;
}
const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pColorAttachments[location].attachment];
current_context.UpdateAccessState(img_view_state, SYNC_COLOR_ATTACHMENT_OUTPUT_COLOR_ATTACHMENT_WRITE,
SyncOrdering::kColorAttachment, offset, extent, 0, tag);
}
}
// PHASE1 TODO: Add layout based read/vs. write selection.
// PHASE1 TODO: Read operations for both depth and stencil are possible in the future.
if (pipe->graphicsPipelineCI.pDepthStencilState && subpass.pDepthStencilAttachment &&
subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const IMAGE_VIEW_STATE *img_view_state = attachment_views_[subpass.pDepthStencilAttachment->attachment];
bool depth_write = false, stencil_write = false;
// PHASE1 TODO: These validation should be in core_checks.
if (!FormatIsStencilOnly(img_view_state->create_info.format) &&
pipe->graphicsPipelineCI.pDepthStencilState->depthTestEnable &&
pipe->graphicsPipelineCI.pDepthStencilState->depthWriteEnable &&
IsImageLayoutDepthWritable(subpass.pDepthStencilAttachment->layout)) {
depth_write = true;
}
// PHASE1 TODO: It needs to check if stencil is writable.
// If failOp, passOp, or depthFailOp are not KEEP, and writeMask isn't 0, it's writable.
// If depth test is disable, it's considered depth test passes, and then depthFailOp doesn't run.
// PHASE1 TODO: These validation should be in core_checks.
if (!FormatIsDepthOnly(img_view_state->create_info.format) &&
pipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable &&
IsImageLayoutStencilWritable(subpass.pDepthStencilAttachment->layout)) {
stencil_write = true;
}
// PHASE1 TODO: Add EARLY stage detection based on ExecutionMode.
if (depth_write) {
current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_DEPTH_BIT,
tag);
}
if (stencil_write) {
current_context.UpdateAccessState(img_view_state, SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE,
SyncOrdering::kDepthStencilAttachment, offset, extent, VK_IMAGE_ASPECT_STENCIL_BIT,
tag);
}
}
}
bool RenderPassAccessContext::ValidateNextSubpass(const CommandExecutionContext &ex_context, const char *func_name) const {
// PHASE1 TODO: Add Validate Preserve attachments
bool skip = false;
skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
current_subpass_);
skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
func_name);
const auto next_subpass = current_subpass_ + 1;
const auto &next_context = subpass_contexts_[next_subpass];
skip |=
next_context.ValidateLayoutTransitions(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
if (!skip) {
// To avoid complex (and buggy) duplication of the affect of layout transitions on load operations, we'll record them
// on a copy of the (empty) next context.
// Note: The resource access map should be empty so hopefully this copy isn't too horrible from a perf POV.
AccessContext temp_context(next_context);
temp_context.RecordLayoutTransitions(*rp_state_, next_subpass, attachment_views_, kCurrentCommandTag);
skip |=
temp_context.ValidateLoadOperation(ex_context, *rp_state_, render_area_, next_subpass, attachment_views_, func_name);
}
return skip;
}
bool RenderPassAccessContext::ValidateEndRenderPass(const CommandExecutionContext &ex_context, const char *func_name) const {
// PHASE1 TODO: Validate Preserve
bool skip = false;
skip |= CurrentContext().ValidateResolveOperations(ex_context, *rp_state_, render_area_, attachment_views_, func_name,
current_subpass_);
skip |= CurrentContext().ValidateStoreOperation(ex_context, *rp_state_, render_area_, current_subpass_, attachment_views_,
func_name);
skip |= ValidateFinalSubpassLayoutTransitions(ex_context, func_name);
return skip;
}
AccessContext *RenderPassAccessContext::CreateStoreResolveProxy() const {
return CreateStoreResolveProxyContext(CurrentContext(), *rp_state_, current_subpass_, render_area_, attachment_views_);
}
bool RenderPassAccessContext::ValidateFinalSubpassLayoutTransitions(const CommandExecutionContext &ex_context,
const char *func_name) const {
bool skip = false;
// As validation methods are const and precede the record/update phase, for any tranistions from the current (last)
// subpass, we have to validate them against a copy of the current AccessContext, with resolve operations applied.
// Note: we could be more efficient by tracking whether or not we actually *have* any changes (e.g. attachment resolve)
// to apply and only copy then, if this proves a hot spot.
std::unique_ptr<AccessContext> proxy_for_current;
// Validate the "finalLayout" transitions to external
// Get them from where there we're hidding in the extra entry.
const auto &final_transitions = rp_state_->subpass_transitions.back();
for (const auto &transition : final_transitions) {
const auto &attach_view = attachment_views_[transition.attachment];
const auto &trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
assert(trackback.context); // Transitions are given implicit transitions if the StateTracker is working correctly
auto *context = trackback.context;
if (transition.prev_pass == current_subpass_) {
if (!proxy_for_current) {
// We haven't recorded resolve ofor the current_subpass, so we need to copy current and update it *as if*
proxy_for_current.reset(CreateStoreResolveProxy());
}
context = proxy_for_current.get();
}
// Use the merged barrier for the hazard check (safe since it just considers the src (first) scope.
const auto merged_barrier = MergeBarriers(trackback.barriers);
auto hazard = context->DetectImageBarrierHazard(*attach_view->image_state, merged_barrier.src_exec_scope,
merged_barrier.src_access_scope, attach_view->normalized_subresource_range,
AccessContext::DetectOptions::kDetectPrevious);
if (hazard.hazard) {
skip |= ex_context.GetSyncState().LogError(
rp_state_->renderPass, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s with last use subpass %" PRIu32 " for attachment %" PRIu32
" final image layout transition (old_layout: %s, new_layout: %s). Access info %s.",
func_name, string_SyncHazard(hazard.hazard), transition.prev_pass, transition.attachment,
string_VkImageLayout(transition.old_layout), string_VkImageLayout(transition.new_layout),
ex_context.FormatUsage(hazard).c_str());
}
}
return skip;
}
void RenderPassAccessContext::RecordLayoutTransitions(const ResourceUsageTag &tag) {
// Add layout transitions...
subpass_contexts_[current_subpass_].RecordLayoutTransitions(*rp_state_, current_subpass_, attachment_views_, tag);
}
void RenderPassAccessContext::RecordLoadOperations(const ResourceUsageTag &tag) {
const auto *attachment_ci = rp_state_->createInfo.pAttachments;
auto &subpass_context = subpass_contexts_[current_subpass_];
VkExtent3D extent = CastTo3D(render_area_.extent);
VkOffset3D offset = CastTo3D(render_area_.offset);
for (uint32_t i = 0; i < rp_state_->createInfo.attachmentCount; i++) {
if (rp_state_->attachment_first_subpass[i] == current_subpass_) {
if (attachment_views_[i] == nullptr) continue; // UNUSED
const auto &view = *attachment_views_[i];
const IMAGE_STATE *image = view.image_state.get();
if (image == nullptr) continue;
const auto &ci = attachment_ci[i];
const bool has_depth = FormatHasDepth(ci.format);
const bool has_stencil = FormatHasStencil(ci.format);
const bool is_color = !(has_depth || has_stencil);
if (is_color) {
subpass_context.UpdateAccessState(*image, ColorLoadUsage(ci.loadOp), SyncOrdering::kColorAttachment,
view.normalized_subresource_range, offset, extent, tag);
} else {
auto update_range = view.normalized_subresource_range;
if (has_depth) {
update_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.loadOp),
SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
}
if (has_stencil) {
update_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
subpass_context.UpdateAccessState(*image, DepthStencilLoadUsage(ci.stencilLoadOp),
SyncOrdering::kDepthStencilAttachment, update_range, offset, extent, tag);
}
}
}
}
}
RenderPassAccessContext::RenderPassAccessContext(const RENDER_PASS_STATE &rp_state, const VkRect2D &render_area,
VkQueueFlags queue_flags,
const std::vector<const IMAGE_VIEW_STATE *> &attachment_views,
const AccessContext *external_context)
: rp_state_(&rp_state), render_area_(render_area), current_subpass_(0U), attachment_views_(attachment_views) {
// Add this for all subpasses here so that they exsist during next subpass validation
subpass_contexts_.reserve(rp_state_->createInfo.subpassCount);
for (uint32_t pass = 0; pass < rp_state_->createInfo.subpassCount; pass++) {
subpass_contexts_.emplace_back(pass, queue_flags, rp_state_->subpass_dependencies, subpass_contexts_, external_context);
}
}
void RenderPassAccessContext::RecordBeginRenderPass(const ResourceUsageTag &tag) {
assert(0 == current_subpass_);
subpass_contexts_[current_subpass_].SetStartTag(tag);
RecordLayoutTransitions(tag);
RecordLoadOperations(tag);
}
void RenderPassAccessContext::RecordNextSubpass(const ResourceUsageTag &prev_subpass_tag,
const ResourceUsageTag &next_subpass_tag) {
// Resolves are against *prior* subpass context and thus *before* the subpass increment
CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, prev_subpass_tag);
CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, prev_subpass_tag);
// Move to the next sub-command for the new subpass. The resolve and store are logically part of the previous
// subpass, so their tag needs to be different from the layout and load operations below.
current_subpass_++;
assert(current_subpass_ < subpass_contexts_.size());
subpass_contexts_[current_subpass_].SetStartTag(next_subpass_tag);
RecordLayoutTransitions(next_subpass_tag);
RecordLoadOperations(next_subpass_tag);
}
void RenderPassAccessContext::RecordEndRenderPass(AccessContext *external_context, const ResourceUsageTag &tag) {
// Add the resolve and store accesses
CurrentContext().UpdateAttachmentResolveAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, tag);
CurrentContext().UpdateAttachmentStoreAccess(*rp_state_, render_area_, attachment_views_, current_subpass_, tag);
// Export the accesses from the renderpass...
external_context->ResolveChildContexts(subpass_contexts_);
// Add the "finalLayout" transitions to external
// Get them from where there we're hidding in the extra entry.
// Not that since *final* always comes from *one* subpass per view, we don't have to accumulate the barriers
// TODO Aliasing we may need to reconsider barrier accumulation... though I don't know that it would be valid for aliasing
// that had mulitple final layout transistions from mulitple final subpasses.
const auto &final_transitions = rp_state_->subpass_transitions.back();
for (const auto &transition : final_transitions) {
const auto &attachment = attachment_views_[transition.attachment];
const auto &last_trackback = subpass_contexts_[transition.prev_pass].GetDstExternalTrackBack();
assert(&subpass_contexts_[transition.prev_pass] == last_trackback.context);
ApplyBarrierOpsFunctor<PipelineBarrierOp> barrier_action(true /* resolve */, last_trackback.barriers.size(), tag);
for (const auto &barrier : last_trackback.barriers) {
barrier_action.EmplaceBack(PipelineBarrierOp(barrier, true));
}
external_context->UpdateResourceAccess(*attachment->image_state, attachment->normalized_subresource_range, barrier_action);
}
}
SyncExecScope SyncExecScope::MakeSrc(VkQueueFlags queue_flags, VkPipelineStageFlags mask_param) {
SyncExecScope result;
result.mask_param = mask_param;
result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
result.exec_scope = sync_utils::WithEarlierPipelineStages(result.expanded_mask);
result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
return result;
}
SyncExecScope SyncExecScope::MakeDst(VkQueueFlags queue_flags, VkPipelineStageFlags mask_param) {
SyncExecScope result;
result.mask_param = mask_param;
result.expanded_mask = sync_utils::ExpandPipelineStages(mask_param, queue_flags);
result.exec_scope = sync_utils::WithLaterPipelineStages(result.expanded_mask);
result.valid_accesses = SyncStageAccess::AccessScopeByStage(result.exec_scope);
return result;
}
SyncBarrier::SyncBarrier(const SyncExecScope &src, const SyncExecScope &dst) {
src_exec_scope = src.exec_scope;
src_access_scope = 0;
dst_exec_scope = dst.exec_scope;
dst_access_scope = 0;
}
template <typename Barrier>
SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst) {
src_exec_scope = src.exec_scope;
src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask);
dst_exec_scope = dst.exec_scope;
dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask);
}
SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const VkSubpassDependency2 &subpass) {
auto src = SyncExecScope::MakeSrc(queue_flags, subpass.srcStageMask);
src_exec_scope = src.exec_scope;
src_access_scope = SyncStageAccess::AccessScope(src.valid_accesses, subpass.srcAccessMask);
auto dst = SyncExecScope::MakeDst(queue_flags, subpass.dstStageMask);
dst_exec_scope = dst.exec_scope;
dst_access_scope = SyncStageAccess::AccessScope(dst.valid_accesses, subpass.dstAccessMask);
}
// Apply a list of barriers, without resolving pending state, useful for subpass layout transitions
void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, bool layout_transition) {
for (const auto &barrier : barriers) {
ApplyBarrier(barrier, layout_transition);
}
}
// ApplyBarriers is design for *fully* inclusive barrier lists without layout tranistions. Designed use was for
// inter-subpass barriers for lazy-evaluation of parent context memory ranges. Subpass layout transistions are *not* done
// lazily, s.t. no previous access reports should need layout transitions.
void ResourceAccessState::ApplyBarriers(const std::vector<SyncBarrier> &barriers, const ResourceUsageTag &tag) {
assert(!pending_layout_transition); // This should never be call in the middle of another barrier application
assert(pending_write_barriers.none());
assert(!pending_write_dep_chain);
for (const auto &barrier : barriers) {
ApplyBarrier(barrier, false);
}
ApplyPendingBarriers(tag);
}
HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index) const {
HazardResult hazard;
auto usage = FlagBit(usage_index);
const auto usage_stage = PipelineStageBit(usage_index);
if (IsRead(usage)) {
if (IsRAWHazard(usage_stage, usage)) {
hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
}
} else {
// Write operation:
// Check for read operations more recent than last_write (as setting last_write clears reads, that would be *any*
// If reads exists -- test only against them because either:
// * the reads were hazards, and we've reported the hazard, so just test the current write vs. the read operations
// * the read weren't hazards, and thus if the write is safe w.r.t. the reads, no hazard vs. last_write is possible if
// the current write happens after the reads, so just test the write against the reades
// Otherwise test against last_write
//
// Look for casus belli for WAR
if (last_reads.size()) {
for (const auto &read_access : last_reads) {
if (IsReadHazard(usage_stage, read_access)) {
hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
break;
}
}
} else if (last_write.any() && IsWriteHazard(usage)) {
// Write-After-Write check -- if we have a previous write to test against
hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
}
}
return hazard;
}
HazardResult ResourceAccessState::DetectHazard(SyncStageAccessIndex usage_index, const SyncOrdering &ordering_rule) const {
const auto &ordering = GetOrderingRules(ordering_rule);
// The ordering guarantees act as barriers to the last accesses, independent of synchronization operations
HazardResult hazard;
const auto usage_bit = FlagBit(usage_index);
const auto usage_stage = PipelineStageBit(usage_index);
const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
const bool last_write_is_ordered = (last_write & ordering.access_scope).any();
if (IsRead(usage_bit)) {
// Exclude RAW if no write, or write not most "most recent" operation w.r.t. usage;
bool is_raw_hazard = IsRAWHazard(usage_stage, usage_bit);
if (is_raw_hazard) {
// NOTE: we know last_write is non-zero
// See if the ordering rules save us from the simple RAW check above
// First check to see if the current usage is covered by the ordering rules
const bool usage_is_input_attachment = (usage_index == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ);
const bool usage_is_ordered =
(input_attachment_ordering && usage_is_input_attachment) || (0 != (usage_stage & ordering.exec_scope));
if (usage_is_ordered) {
// Now see of the most recent write (or a subsequent read) are ordered
const bool most_recent_is_ordered = last_write_is_ordered || (0 != GetOrderedStages(ordering));
is_raw_hazard = !most_recent_is_ordered;
}
}
if (is_raw_hazard) {
hazard.Set(this, usage_index, READ_AFTER_WRITE, last_write, write_tag);
}
} else {
// Only check for WAW if there are no reads since last_write
bool usage_write_is_ordered = (usage_bit & ordering.access_scope).any();
if (last_reads.size()) {
// Look for any WAR hazards outside the ordered set of stages
VkPipelineStageFlags ordered_stages = 0;
if (usage_write_is_ordered) {
// If the usage is ordered, we can ignore all ordered read stages w.r.t. WAR)
ordered_stages = GetOrderedStages(ordering);
}
// If we're tracking any reads that aren't ordered against the current write, got to check 'em all.
if ((ordered_stages & last_read_stages) != last_read_stages) {
for (const auto &read_access : last_reads) {
if (read_access.stage & ordered_stages) continue; // but we can skip the ordered ones
if (IsReadHazard(usage_stage, read_access)) {
hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
break;
}
}
}
} else if (!(last_write_is_ordered && usage_write_is_ordered)) {
if (last_write.any() && IsWriteHazard(usage_bit)) {
hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
}
}
}
return hazard;
}
// Asynchronous Hazards occur between subpasses with no connection through the DAG
HazardResult ResourceAccessState::DetectAsyncHazard(SyncStageAccessIndex usage_index, const ResourceUsageTag &start_tag) const {
HazardResult hazard;
auto usage = FlagBit(usage_index);
// Async checks need to not go back further than the start of the subpass, as we only want to find hazards between the async
// subpasses. Anything older than that should have been checked at the start of each subpass, taking into account all of
// the raster ordering rules.
if (IsRead(usage)) {
if (last_write.any() && (write_tag.index >= start_tag.index)) {
hazard.Set(this, usage_index, READ_RACING_WRITE, last_write, write_tag);
}
} else {
if (last_write.any() && (write_tag.index >= start_tag.index)) {
hazard.Set(this, usage_index, WRITE_RACING_WRITE, last_write, write_tag);
} else if (last_reads.size() > 0) {
// Any reads during the other subpass will conflict with this write, so we need to check them all.
for (const auto &read_access : last_reads) {
if (read_access.tag.index >= start_tag.index) {
hazard.Set(this, usage_index, WRITE_RACING_READ, read_access.access, read_access.tag);
break;
}
}
}
}
return hazard;
}
HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
const SyncStageAccessFlags &src_access_scope) const {
// Only supporting image layout transitions for now
assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
HazardResult hazard;
// only test for WAW if there no intervening read operations.
// See DetectHazard(SyncStagetAccessIndex) above for more details.
if (last_reads.size()) {
// Look at the reads if any
for (const auto &read_access : last_reads) {
if (read_access.IsReadBarrierHazard(src_exec_scope)) {
hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
break;
}
}
} else if (last_write.any() && IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
}
return hazard;
}
HazardResult ResourceAccessState::DetectBarrierHazard(SyncStageAccessIndex usage_index, VkPipelineStageFlags src_exec_scope,
const SyncStageAccessFlags &src_access_scope,
const ResourceUsageTag &event_tag) const {
// Only supporting image layout transitions for now
assert(usage_index == SyncStageAccessIndex::SYNC_IMAGE_LAYOUT_TRANSITION);
HazardResult hazard;
// only test for WAW if there no intervening read operations.
// See DetectHazard(SyncStagetAccessIndex) above for more details.
if (last_reads.size()) {
// Look at the reads if any... if reads exist, they are either the resaon the access is in the event
// first scope, or they are a hazard.
for (const auto &read_access : last_reads) {
if (read_access.tag.IsBefore(event_tag)) {
// The read is in the events first synchronization scope, so we use a barrier hazard check
// If the read stage is not in the src sync scope
// *AND* not execution chained with an existing sync barrier (that's the or)
// then the barrier access is unsafe (R/W after R)
if (read_access.IsReadBarrierHazard(src_exec_scope)) {
hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
break;
}
} else {
// The read not in the event first sync scope and so is a hazard vs. the layout transition
hazard.Set(this, usage_index, WRITE_AFTER_READ, read_access.access, read_access.tag);
}
}
} else if (last_write.any()) {
// if there are no reads, the write is either the reason the access is in the event scope... they are a hazard
if (write_tag.IsBefore(event_tag)) {
// The write is in the first sync scope of the event (sync their aren't any reads to be the reason)
// So do a normal barrier hazard check
if (IsWriteBarrierHazard(src_exec_scope, src_access_scope)) {
hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
}
} else {
// The write isn't in scope, and is thus a hazard to the layout transistion for wait
hazard.Set(this, usage_index, WRITE_AFTER_WRITE, last_write, write_tag);
}
}
return hazard;
}
// The logic behind resolves is the same as update, we assume that earlier hazards have be reported, and that no
// tranistive hazard can exists with a hazard between the earlier operations. Yes, an early hazard can mask that another
// exists, but if you fix *that* hazard it either fixes or unmasks the subsequent ones.
void ResourceAccessState::Resolve(const ResourceAccessState &other) {
if (write_tag.IsBefore(other.write_tag)) {
// If this is a later write, we've reported any exsiting hazard, and we can just overwrite as the more recent
// operation
*this = other;
} else if (!other.write_tag.IsBefore(write_tag)) {
// This is the *equals* case for write operations, we merged the write barriers and the read state (but without the
// dependency chaining logic or any stage expansion)
write_barriers |= other.write_barriers;
pending_write_barriers |= other.pending_write_barriers;
pending_layout_transition |= other.pending_layout_transition;
pending_write_dep_chain |= other.pending_write_dep_chain;
// Merge the read states
const auto pre_merge_count = last_reads.size();
const auto pre_merge_stages = last_read_stages;
for (uint32_t other_read_index = 0; other_read_index < other.last_reads.size(); other_read_index++) {
auto &other_read = other.last_reads[other_read_index];
if (pre_merge_stages & other_read.stage) {
// Merge in the barriers for read stages that exist in *both* this and other
// TODO: This is N^2 with stages... perhaps the ReadStates should be sorted by stage index.
// but we should wait on profiling data for that.
for (uint32_t my_read_index = 0; my_read_index < pre_merge_count; my_read_index++) {
auto &my_read = last_reads[my_read_index];
if (other_read.stage == my_read.stage) {
if (my_read.tag.IsBefore(other_read.tag)) {
// Other is more recent, copy in the state
my_read.access = other_read.access;
my_read.tag = other_read.tag;
my_read.pending_dep_chain = other_read.pending_dep_chain;
// TODO: Phase 2 -- review the state merge logic to avoid false positive from overwriting the barriers
// May require tracking more than one access per stage.
my_read.barriers = other_read.barriers;
if (my_read.stage == VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
// Since I'm overwriting the fragement stage read, also update the input attachment info
// as this is the only stage that affects it.
input_attachment_read = other.input_attachment_read;
}
} else if (other_read.tag.IsBefore(my_read.tag)) {
// The read tags match so merge the barriers
my_read.barriers |= other_read.barriers;
my_read.pending_dep_chain |= other_read.pending_dep_chain;
}
break;
}
}
} else {
// The other read stage doesn't exist in this, so add it.
last_reads.emplace_back(other_read);
last_read_stages |= other_read.stage;
if (other_read.stage == VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
input_attachment_read = other.input_attachment_read;
}
}
}
read_execution_barriers |= other.read_execution_barriers;
} // the else clause would be that other write is before this write... in which case we supercede the other state and
// ignore it.
// Merge first access information by making a copy of this first_access and reconstructing with a shuffle
// of the copy and other into this using the update first logic.
// NOTE: All sorts of additional cleverness could be put into short circuts. (for example back is write and is before front
// of the other first_accesses... )
if (!(first_accesses_ == other.first_accesses_) && !other.first_accesses_.empty()) {
FirstAccesses firsts(std::move(first_accesses_));
first_accesses_.clear();
first_read_stages_ = 0U;
auto a = firsts.begin();
auto a_end = firsts.end();
for (auto &b : other.first_accesses_) {
// TODO: Determine whether "IsBefore" or "IsGloballyBefore" is needed...
while (a != a_end && a->tag.IsBefore(b.tag)) {
UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
++a;
}
UpdateFirst(b.tag, b.usage_index, b.ordering_rule);
}
for (; a != a_end; ++a) {
UpdateFirst(a->tag, a->usage_index, a->ordering_rule);
}
}
}
void ResourceAccessState::Update(SyncStageAccessIndex usage_index, SyncOrdering ordering_rule, const ResourceUsageTag &tag) {
// Move this logic in the ResourceStateTracker as methods, thereof (or we'll repeat it for every flavor of resource...
const auto usage_bit = FlagBit(usage_index);
if (IsRead(usage_index)) {
// Mulitple outstanding reads may be of interest and do dependency chains independently
// However, for purposes of barrier tracking, only one read per pipeline stage matters
const auto usage_stage = PipelineStageBit(usage_index);
if (usage_stage & last_read_stages) {
for (auto &read_access : last_reads) {
if (read_access.stage == usage_stage) {
read_access.Set(usage_stage, usage_bit, 0, tag);
break;
}
}
} else {
last_reads.emplace_back(usage_stage, usage_bit, 0, tag);
last_read_stages |= usage_stage;
}
// Fragment shader reads come in two flavors, and we need to track if the one we're tracking is the special one.
if (usage_stage == VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
// TODO Revisit re: multiple reads for a given stage
input_attachment_read = (usage_bit == SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT);
}
} else {
// Assume write
// TODO determine what to do with READ-WRITE operations if any
SetWrite(usage_bit, tag);
}
UpdateFirst(tag, usage_index, ordering_rule);
}
// Clobber last read and all barriers... because all we have is DANGER, DANGER, WILL ROBINSON!!!
// if the last_reads/last_write were unsafe, we've reported them, in either case the prior access is irrelevant.
// We can overwrite them as *this* write is now after them.
//
// Note: intentionally ignore pending barriers and chains (i.e. don't apply or clear them), let ApplyPendingBarriers handle them.
void ResourceAccessState::SetWrite(const SyncStageAccessFlags &usage_bit, const ResourceUsageTag &tag) {
last_reads.clear();
last_read_stages = 0;
read_execution_barriers = 0;
input_attachment_read = false; // Denotes no outstanding input attachment read after the last write.
write_barriers = 0;
write_dependency_chain = 0;
write_tag = tag;
last_write = usage_bit;
}
// Apply the memory barrier without updating the existing barriers. The execution barrier
// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
// replace the current write barriers or add to them, so accumulate to pending as well.
void ResourceAccessState::ApplyBarrier(const SyncBarrier &barrier, bool layout_transition) {
// For independent barriers we need to track what the new barriers and dependency chain *will* be when we're done
// applying the memory barriers
// NOTE: We update the write barrier if the write is in the first access scope or if there is a layout
// transistion, under the theory of "most recent access". If the read/write *isn't* safe
// vs. this layout transition DetectBarrierHazard should report it. We treat the layout
// transistion *as* a write and in scope with the barrier (it's before visibility).
if (layout_transition || WriteInSourceScopeOrChain(barrier.src_exec_scope, barrier.src_access_scope)) {
pending_write_barriers |= barrier.dst_access_scope;
pending_write_dep_chain |= barrier.dst_exec_scope;
}
// Track layout transistion as pending as we can't modify last_write until all barriers processed
pending_layout_transition |= layout_transition;
if (!pending_layout_transition) {
// Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
// don't need to be tracked as we're just going to zero them.
for (auto &read_access : last_reads) {
// The | implements the "dependency chain" logic for this access, as the barriers field stores the second sync scope
if (barrier.src_exec_scope & (read_access.stage | read_access.barriers)) {
read_access.pending_dep_chain |= barrier.dst_exec_scope;
}
}
}
}
// Apply the tag scoped memory barrier without updating the existing barriers. The execution barrier
// changes the "chaining" state, but to keep barriers independent. See discussion above.
void ResourceAccessState::ApplyBarrier(const ResourceUsageTag &scope_tag, const SyncBarrier &barrier, bool layout_transition) {
// The scope logic for events is, if we're here, the resource usage was flagged as "in the first execution scope" at
// the time of the SetEvent, thus all we need check is whether the access is the same one (i.e. before the scope tag
// in order to know if it's in the excecution scope
// Notice that the layout transition sets the pending barriers *regardless*, as any lack of src_access_scope to
// guard against the layout transition should be reported in the detect barrier hazard phase, and we only report
// errors w.r.t. "most recent" accesses.
if (layout_transition || ((write_tag.IsBefore(scope_tag)) && (barrier.src_access_scope & last_write).any())) {
pending_write_barriers |= barrier.dst_access_scope;
pending_write_dep_chain |= barrier.dst_exec_scope;
}
// Track layout transistion as pending as we can't modify last_write until all barriers processed
pending_layout_transition |= layout_transition;
if (!pending_layout_transition) {
// Once we're dealing with a layout transition (which is modelled as a *write*) then the last reads/writes/chains
// don't need to be tracked as we're just going to zero them.
for (auto &read_access : last_reads) {
// If this read is the same one we included in the set event and in scope, then apply the execution barrier...
// NOTE: That's not really correct... this read stage might *not* have been included in the setevent, and the barriers
// representing the chain might have changed since then (that would be an odd usage), so as a first approximation
// we'll assume the barriers *haven't* been changed since (if the tag hasn't), and while this could be a false
// positive in the case of Set; SomeBarrier; Wait; we'll live with it until we can add more state to the first scope
// capture (the specific write and read stages that *were* in scope at the moment of SetEvents.
// TODO: eliminate the false positive by including write/read-stages "in scope" information in SetEvents first_scope
if (read_access.tag.IsBefore(scope_tag) && (barrier.src_exec_scope & (read_access.stage | read_access.barriers))) {
read_access.pending_dep_chain |= barrier.dst_exec_scope;
}
}
}
}
void ResourceAccessState::ApplyPendingBarriers(const ResourceUsageTag &tag) {
if (pending_layout_transition) {
// SetWrite clobbers the read count, and thus we don't have to clear the read_state out.
SetWrite(SYNC_IMAGE_LAYOUT_TRANSITION_BIT, tag); // Side effect notes below
UpdateFirst(tag, SYNC_IMAGE_LAYOUT_TRANSITION, SyncOrdering::kNonAttachment);
pending_layout_transition = false;
}
// Apply the accumulate execution barriers (and thus update chaining information)
// for layout transition, read count is zeroed by SetWrite, so this will be skipped.
for (auto &read_access : last_reads) {
read_access.barriers |= read_access.pending_dep_chain;
read_execution_barriers |= read_access.barriers;
read_access.pending_dep_chain = 0;
}
// We OR in the accumulated write chain and barriers even in the case of a layout transition as SetWrite zeros them.
write_dependency_chain |= pending_write_dep_chain;
write_barriers |= pending_write_barriers;
pending_write_dep_chain = 0;
pending_write_barriers = 0;
}
// This should be just Bits or Index, but we don't have an invalid state for Index
VkPipelineStageFlags ResourceAccessState::GetReadBarriers(const SyncStageAccessFlags &usage_bit) const {
VkPipelineStageFlags barriers = 0U;
for (const auto &read_access : last_reads) {
if ((read_access.access & usage_bit).any()) {
barriers = read_access.barriers;
break;
}
}
return barriers;
}
inline bool ResourceAccessState::IsRAWHazard(VkPipelineStageFlagBits usage_stage, const SyncStageAccessFlags &usage) const {
assert(IsRead(usage));
// Only RAW vs. last_write if it doesn't happen-after any other read because either:
// * the previous reads are not hazards, and thus last_write must be visible and available to
// any reads that happen after.
// * the previous reads *are* hazards to last_write, have been reported, and if that hazard is fixed
// the current read will be also not be a hazard, thus reporting a hazard here adds no needed information.
return last_write.any() && (0 == (read_execution_barriers & usage_stage)) && IsWriteHazard(usage);
}
VkPipelineStageFlags ResourceAccessState::GetOrderedStages(const OrderingBarrier &ordering) const {
// Whether the stage are in the ordering scope only matters if the current write is ordered
VkPipelineStageFlags ordered_stages = last_read_stages & ordering.exec_scope;
// Special input attachment handling as always (not encoded in exec_scop)
const bool input_attachment_ordering = (ordering.access_scope & SYNC_FRAGMENT_SHADER_INPUT_ATTACHMENT_READ_BIT).any();
if (input_attachment_ordering && input_attachment_read) {
// If we have an input attachment in last_reads and input attachments are ordered we all that stage
ordered_stages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
return ordered_stages;
}
void ResourceAccessState::UpdateFirst(const ResourceUsageTag &tag, SyncStageAccessIndex usage_index, SyncOrdering ordering_rule) {
// Only record until we record a write.
if (first_accesses_.empty() || IsRead(first_accesses_.back().usage_index)) {
const VkPipelineStageFlags usage_stage =
IsRead(usage_index) ? static_cast<VkPipelineStageFlags>(PipelineStageBit(usage_index)) : 0U;
if (0 == (usage_stage & first_read_stages_)) {
// If this is a read we haven't seen or a write, record.
first_read_stages_ |= usage_stage;
first_accesses_.emplace_back(tag, usage_index, ordering_rule);
}
}
}
void SyncValidator::ResetCommandBufferCallback(VkCommandBuffer command_buffer) {
auto *access_context = GetAccessContextNoInsert(command_buffer);
if (access_context) {
access_context->Reset();
}
}
void SyncValidator::FreeCommandBufferCallback(VkCommandBuffer command_buffer) {
auto access_found = cb_access_state.find(command_buffer);
if (access_found != cb_access_state.end()) {
access_found->second->Reset();
cb_access_state.erase(access_found);
}
}
bool SyncValidator::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) const {
bool skip = false;
const auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return skip;
const auto *context = cb_context->GetCurrentAccessContext();
// If we have no previous accesses, we have no hazards
const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
if (src_buffer) {
const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
if (hazard.hazard) {
skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
"vkCmdCopyBuffer: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
cb_context->FormatUsage(hazard).c_str());
}
}
if (dst_buffer && !skip) {
const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
if (hazard.hazard) {
skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
"vkCmdCopyBuffer: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
cb_context->FormatUsage(hazard).c_str());
}
}
if (skip) break;
}
return skip;
}
void SyncValidator::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) {
auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER);
auto *context = cb_context->GetCurrentAccessContext();
const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
if (src_buffer) {
const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
}
if (dst_buffer) {
const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
}
}
}
void SyncValidator::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
// Clear out events from the command buffer contexts
for (auto &cb_context : cb_access_state) {
cb_context.second->RecordDestroyEvent(event);
}
}
bool SyncValidator::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
bool skip = false;
const auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return skip;
const auto *context = cb_context->GetCurrentAccessContext();
// If we have no previous accesses, we have no hazards
const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
const auto ©_region = pCopyBufferInfos->pRegions[region];
if (src_buffer) {
const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
auto hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
if (hazard.hazard) {
// TODO -- add tag information to log msg when useful.
skip |= LogError(pCopyBufferInfos->srcBuffer, string_SyncHazardVUID(hazard.hazard),
"vkCmdCopyBuffer2KHR(): Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->srcBuffer).c_str(),
region, cb_context->FormatUsage(hazard).c_str());
}
}
if (dst_buffer && !skip) {
const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
if (hazard.hazard) {
skip |= LogError(pCopyBufferInfos->dstBuffer, string_SyncHazardVUID(hazard.hazard),
"vkCmdCopyBuffer2KHR(): Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyBufferInfos->dstBuffer).c_str(),
region, cb_context->FormatUsage(hazard).c_str());
}
}
if (skip) break;
}
return skip;
}
void SyncValidator::PreCallRecordCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR *pCopyBufferInfos) {
auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
const auto tag = cb_context->NextCommandTag(CMD_COPYBUFFER2KHR);
auto *context = cb_context->GetCurrentAccessContext();
const auto *src_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->srcBuffer);
const auto *dst_buffer = Get<BUFFER_STATE>(pCopyBufferInfos->dstBuffer);
for (uint32_t region = 0; region < pCopyBufferInfos->regionCount; region++) {
const auto ©_region = pCopyBufferInfos->pRegions[region];
if (src_buffer) {
const ResourceAccessRange src_range = MakeRange(*src_buffer, copy_region.srcOffset, copy_region.size);
context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
}
if (dst_buffer) {
const ResourceAccessRange dst_range = MakeRange(*dst_buffer, copy_region.dstOffset, copy_region.size);
context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
}
}
}
bool SyncValidator::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *src_image = Get<IMAGE_STATE>(srcImage);
const auto *dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
if (src_image) {
auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
copy_region.srcOffset, copy_region.extent);
if (hazard.hazard) {
skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
"vkCmdCopyImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
}
if (dst_image) {
VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
copy_region.dstOffset, dst_copy_extent);
if (hazard.hazard) {
skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
"vkCmdCopyImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
if (skip) break;
}
}
return skip;
}
void SyncValidator::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
auto *src_image = Get<IMAGE_STATE>(srcImage);
auto *dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
if (src_image) {
context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
}
if (dst_image) {
VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
}
}
}
bool SyncValidator::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer,
const VkCopyImageInfo2KHR *pCopyImageInfo) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
const auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
const auto ©_region = pCopyImageInfo->pRegions[region];
if (src_image) {
auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.srcSubresource,
copy_region.srcOffset, copy_region.extent);
if (hazard.hazard) {
skip |= LogError(pCopyImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
"vkCmdCopyImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->srcImage).c_str(),
region, cb_access_context->FormatUsage(hazard).c_str());
}
}
if (dst_image) {
VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.dstSubresource,
copy_region.dstOffset, dst_copy_extent);
if (hazard.hazard) {
skip |= LogError(pCopyImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
"vkCmdCopyImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(pCopyImageInfo->dstImage).c_str(),
region, cb_access_context->FormatUsage(hazard).c_str());
}
if (skip) break;
}
}
return skip;
}
void SyncValidator::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_COPYIMAGE2KHR);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
auto *src_image = Get<IMAGE_STATE>(pCopyImageInfo->srcImage);
auto *dst_image = Get<IMAGE_STATE>(pCopyImageInfo->dstImage);
for (uint32_t region = 0; region < pCopyImageInfo->regionCount; region++) {
const auto ©_region = pCopyImageInfo->pRegions[region];
if (src_image) {
context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
copy_region.srcSubresource, copy_region.srcOffset, copy_region.extent, tag);
}
if (dst_image) {
VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_image->createInfo.format, dst_image->createInfo.format, copy_region.extent);
context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
copy_region.dstSubresource, copy_region.dstOffset, dst_copy_extent, tag);
}
}
}
bool SyncValidator::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
skip = pipeline_barrier.Validate(*cb_access_context);
return skip;
}
void SyncValidator::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return;
SyncOpPipelineBarrier pipeline_barrier(CMD_PIPELINEBARRIER, *this, cb_access_context->GetQueueFlags(), srcStageMask,
dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
pipeline_barrier.Record(cb_access_context);
}
void SyncValidator::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
// The state tracker sets up the device state
StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
// Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker
// refactor would be messier without.
// TODO: Find a good way to do this hooklessly.
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeSyncValidation);
SyncValidator *sync_device_state = static_cast<SyncValidator *>(validation_data);
sync_device_state->SetCommandBufferResetCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
sync_device_state->ResetCommandBufferCallback(command_buffer);
});
sync_device_state->SetCommandBufferFreeCallback([sync_device_state](VkCommandBuffer command_buffer) -> void {
sync_device_state->FreeCommandBufferCallback(command_buffer);
});
}
bool SyncValidator::ValidateBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd, const char *cmd_name) const {
bool skip = false;
auto cb_context = GetAccessContext(commandBuffer);
if (cb_context) {
SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo, cmd_name);
skip = sync_op.Validate(*cb_context);
}
return skip;
}
bool SyncValidator::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) const {
bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
subpass_begin_info.contents = contents;
skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
return skip;
}
bool SyncValidator::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) const {
bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
skip |= ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
return skip;
}
static const char *kBeginRenderPass2KhrName = "vkCmdBeginRenderPass2KHR";
bool SyncValidator::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) const {
bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
skip |=
ValidateBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2, kBeginRenderPass2KhrName);
return skip;
}
void SyncValidator::PostCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo,
VkResult result) {
// The state tracker sets up the command buffer state
StateTracker::PostCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo, result);
// Create/initialize the structure that trackers accesses at the command buffer scope.
auto cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
cb_access_context->Reset();
}
void SyncValidator::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo, CMD_TYPE cmd, const char *cmd_name) {
auto cb_context = GetAccessContext(commandBuffer);
if (cb_context) {
SyncOpBeginRenderPass sync_op(cmd, *this, pRenderPassBegin, pSubpassBeginInfo, cmd_name);
sync_op.Record(cb_context);
}
}
void SyncValidator::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
subpass_begin_info.contents = contents;
RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, &subpass_begin_info, CMD_BEGINRENDERPASS);
}
void SyncValidator::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) {
StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2);
}
void SyncValidator::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) {
StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, CMD_BEGINRENDERPASS2, kBeginRenderPass2KhrName);
}
bool SyncValidator::ValidateCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd, const char *cmd_name) const {
bool skip = false;
auto cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return skip;
SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo, cmd_name);
return sync_op.Validate(*cb_context);
}
bool SyncValidator::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
bool skip = StateTracker::PreCallValidateCmdNextSubpass(commandBuffer, contents);
// Convert to a NextSubpass2
auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
subpass_begin_info.contents = contents;
auto subpass_end_info = LvlInitStruct<VkSubpassEndInfo>();
skip |= ValidateCmdNextSubpass(commandBuffer, &subpass_begin_info, &subpass_end_info, CMD_NEXTSUBPASS);
return skip;
}
static const char *kNextSubpass2KhrName = "vkCmdNextSubpass2KHR";
bool SyncValidator::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = StateTracker::PreCallValidateCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2, kNextSubpass2KhrName);
return skip;
}
bool SyncValidator::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = StateTracker::PreCallValidateCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
skip |= ValidateCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
return skip;
}
void SyncValidator::RecordCmdNextSubpass(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd, const char *cmd_name) {
auto cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return;
SyncOpNextSubpass sync_op(cmd, *this, pSubpassBeginInfo, pSubpassEndInfo, cmd_name);
sync_op.Record(cb_context);
}
void SyncValidator::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
auto subpass_begin_info = LvlInitStruct<VkSubpassBeginInfo>();
subpass_begin_info.contents = contents;
RecordCmdNextSubpass(commandBuffer, &subpass_begin_info, nullptr, CMD_NEXTSUBPASS);
}
void SyncValidator::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2);
}
void SyncValidator::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpass(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, CMD_NEXTSUBPASS2, kNextSubpass2KhrName);
}
bool SyncValidator::ValidateCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd,
const char *cmd_name) const {
bool skip = false;
auto cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return skip;
SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo, cmd_name);
skip |= sync_op.Validate(*cb_context);
return skip;
}
bool SyncValidator::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
bool skip = StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer);
skip |= ValidateCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
return skip;
}
bool SyncValidator::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = StateTracker::PreCallValidateCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
return skip;
}
const static char *kEndRenderPass2KhrName = "vkEndRenderPass2KHR";
bool SyncValidator::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = StateTracker::PreCallValidateCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
skip |= ValidateCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2, kEndRenderPass2KhrName);
return skip;
}
void SyncValidator::RecordCmdEndRenderPass(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo, CMD_TYPE cmd,
const char *cmd_name) {
// Resolve the all subpass contexts to the command buffer contexts
auto cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return;
SyncOpEndRenderPass sync_op(cmd, *this, pSubpassEndInfo, cmd_name);
sync_op.Record(cb_context);
return;
}
// Simple heuristic rule to detect WAW operations representing algorithmically safe or increment
// updates to a resource which do not conflict at the byte level.
// TODO: Revisit this rule to see if it needs to be tighter or looser
// TODO: Add programatic control over suppression heuristics
bool SyncValidator::SupressedBoundDescriptorWAW(const HazardResult &hazard) const {
return (hazard.hazard == WRITE_AFTER_WRITE) && (FlagBit(hazard.usage_index) == hazard.prior_access);
}
void SyncValidator::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
RecordCmdEndRenderPass(commandBuffer, nullptr, CMD_ENDRENDERPASS);
StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
}
void SyncValidator::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2);
StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
}
void SyncValidator::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
RecordCmdEndRenderPass(commandBuffer, pSubpassEndInfo, CMD_ENDRENDERPASS2, kEndRenderPass2KhrName);
StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
}
template <typename BufferImageCopyRegionType>
bool SyncValidator::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyBufferToImage2KHR()" : "vkCmdCopyBufferToImage()";
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
const auto *dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
HazardResult hazard;
if (dst_image) {
if (src_buffer) {
ResourceAccessRange src_range =
MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
hazard = context->DetectHazard(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, src_range);
if (hazard.hazard) {
// PHASE1 TODO -- add tag information to log msg when useful.
skip |= LogError(srcBuffer, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for srcBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcBuffer).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
}
hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, copy_region.imageSubresource,
copy_region.imageOffset, copy_region.imageExtent);
if (hazard.hazard) {
skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", func_name,
string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
if (skip) break;
}
if (skip) break;
}
return skip;
}
bool SyncValidator::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) const {
return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool SyncValidator::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
}
template <typename BufferImageCopyRegionType>
void SyncValidator::RecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) {
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const CMD_TYPE cmd_type = is_2khr ? CMD_COPYBUFFERTOIMAGE2KHR : CMD_COPYBUFFERTOIMAGE;
const auto tag = cb_access_context->NextCommandTag(cmd_type);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
const auto *src_buffer = Get<BUFFER_STATE>(srcBuffer);
const auto *dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
if (dst_image) {
if (src_buffer) {
ResourceAccessRange src_range =
MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, dst_image->createInfo.format));
context->UpdateAccessState(*src_buffer, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment, src_range, tag);
}
context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
}
}
}
void SyncValidator::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
RecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, COPY_COMMAND_VERSION_1);
}
void SyncValidator::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) {
StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo);
RecordCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
}
template <typename BufferImageCopyRegionType>
bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount,
const BufferImageCopyRegionType *pRegions, CopyCommandVersion version) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyImageToBuffer2KHR()" : "vkCmdCopyImageToBuffer()";
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *src_image = Get<IMAGE_STATE>(srcImage);
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
if (src_image) {
auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, copy_region.imageSubresource,
copy_region.imageOffset, copy_region.imageExtent);
if (hazard.hazard) {
skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", func_name,
string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
if (dst_mem) {
ResourceAccessRange dst_range =
MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, dst_range);
if (hazard.hazard) {
skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for dstBuffer %s, region %" PRIu32 ". Access info %s.", func_name,
string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstBuffer).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
}
}
if (skip) break;
}
return skip;
}
bool SyncValidator::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount,
const VkBufferImageCopy *pRegions) const {
return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool SyncValidator::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
}
template <typename BufferImageCopyRegionType>
void SyncValidator::RecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const BufferImageCopyRegionType *pRegions,
CopyCommandVersion version) {
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGETOBUFFER2KHR : CMD_COPYIMAGETOBUFFER;
const auto tag = cb_access_context->NextCommandTag(cmd_type);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
const auto *src_image = Get<IMAGE_STATE>(srcImage);
auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->binding.mem_state->mem : VK_NULL_HANDLE;
const VulkanTypedHandle dst_handle(dst_mem, kVulkanObjectTypeDeviceMemory);
for (uint32_t region = 0; region < regionCount; region++) {
const auto ©_region = pRegions[region];
if (src_image) {
context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
copy_region.imageSubresource, copy_region.imageOffset, copy_region.imageExtent, tag);
if (dst_buffer) {
ResourceAccessRange dst_range =
MakeRange(copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->createInfo.format));
context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, dst_range, tag);
}
}
}
}
void SyncValidator::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
RecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, COPY_COMMAND_VERSION_1);
}
void SyncValidator::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
RecordCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
}
template <typename RegionType>
bool SyncValidator::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const RegionType *pRegions, VkFilter filter, const char *apiName) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *src_image = Get<IMAGE_STATE>(srcImage);
const auto *dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto &blit_region = pRegions[region];
if (src_image) {
VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
auto hazard =
context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, blit_region.srcSubresource, offset, extent);
if (hazard.hazard) {
skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.", apiName,
string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
}
if (dst_image) {
VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
auto hazard =
context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, blit_region.dstSubresource, offset, extent);
if (hazard.hazard) {
skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.", apiName,
string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
if (skip) break;
}
}
return skip;
}
bool SyncValidator::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) const {
return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
"vkCmdBlitImage");
}
bool SyncValidator::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer,
const VkBlitImageInfo2KHR *pBlitImageInfo) const {
return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
pBlitImageInfo->filter, "vkCmdBlitImage2KHR");
}
template <typename RegionType>
void SyncValidator::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const RegionType *pRegions, VkFilter filter, ResourceUsageTag tag) {
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
auto *src_image = Get<IMAGE_STATE>(srcImage);
auto *dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto &blit_region = pRegions[region];
if (src_image) {
VkOffset3D offset = {std::min(blit_region.srcOffsets[0].x, blit_region.srcOffsets[1].x),
std::min(blit_region.srcOffsets[0].y, blit_region.srcOffsets[1].y),
std::min(blit_region.srcOffsets[0].z, blit_region.srcOffsets[1].z)};
VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.srcOffsets[1].x - blit_region.srcOffsets[0].x)),
static_cast<uint32_t>(abs(blit_region.srcOffsets[1].y - blit_region.srcOffsets[0].y)),
static_cast<uint32_t>(abs(blit_region.srcOffsets[1].z - blit_region.srcOffsets[0].z))};
context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
blit_region.srcSubresource, offset, extent, tag);
}
if (dst_image) {
VkOffset3D offset = {std::min(blit_region.dstOffsets[0].x, blit_region.dstOffsets[1].x),
std::min(blit_region.dstOffsets[0].y, blit_region.dstOffsets[1].y),
std::min(blit_region.dstOffsets[0].z, blit_region.dstOffsets[1].z)};
VkExtent3D extent = {static_cast<uint32_t>(abs(blit_region.dstOffsets[1].x - blit_region.dstOffsets[0].x)),
static_cast<uint32_t>(abs(blit_region.dstOffsets[1].y - blit_region.dstOffsets[0].y)),
static_cast<uint32_t>(abs(blit_region.dstOffsets[1].z - blit_region.dstOffsets[0].z))};
context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
blit_region.dstSubresource, offset, extent, tag);
}
}
}
void SyncValidator::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE);
StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions, filter);
RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, tag);
}
void SyncValidator::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_BLITIMAGE2KHR);
RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
pBlitImageInfo->filter, tag);
}
bool SyncValidator::ValidateIndirectBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
VkCommandBuffer commandBuffer, const VkDeviceSize struct_size, const VkBuffer buffer,
const VkDeviceSize offset, const uint32_t drawCount, const uint32_t stride,
const char *function) const {
bool skip = false;
if (drawCount == 0) return skip;
const auto *buf_state = Get<BUFFER_STATE>(buffer);
VkDeviceSize size = struct_size;
if (drawCount == 1 || stride == size) {
if (drawCount > 1) size *= drawCount;
const ResourceAccessRange range = MakeRange(offset, size);
auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
if (hazard.hazard) {
skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
cb_context.FormatUsage(hazard).c_str());
}
} else {
for (uint32_t i = 0; i < drawCount; ++i) {
const ResourceAccessRange range = MakeRange(offset + i * stride, size);
auto hazard = context.DetectHazard(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
if (hazard.hazard) {
skip |= LogError(buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for indirect %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
cb_context.FormatUsage(hazard).c_str());
break;
}
}
}
return skip;
}
void SyncValidator::RecordIndirectBuffer(AccessContext &context, const ResourceUsageTag &tag, const VkDeviceSize struct_size,
const VkBuffer buffer, const VkDeviceSize offset, const uint32_t drawCount,
uint32_t stride) {
const auto *buf_state = Get<BUFFER_STATE>(buffer);
VkDeviceSize size = struct_size;
if (drawCount == 1 || stride == size) {
if (drawCount > 1) size *= drawCount;
const ResourceAccessRange range = MakeRange(offset, size);
context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
} else {
for (uint32_t i = 0; i < drawCount; ++i) {
const ResourceAccessRange range = MakeRange(offset + i * stride, size);
context.UpdateAccessState(*buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range,
tag);
}
}
}
bool SyncValidator::ValidateCountBuffer(const CommandBufferAccessContext &cb_context, const AccessContext &context,
VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
const char *function) const {
bool skip = false;
const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
const ResourceAccessRange range = MakeRange(offset, 4);
auto hazard = context.DetectHazard(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, range);
if (hazard.hazard) {
skip |= LogError(count_buf_state->buffer, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for countBuffer %s in %s. Access info %s.", function, string_SyncHazard(hazard.hazard),
report_data->FormatHandle(buffer).c_str(), report_data->FormatHandle(commandBuffer).c_str(),
cb_context.FormatUsage(hazard).c_str());
}
return skip;
}
void SyncValidator::RecordCountBuffer(AccessContext &context, const ResourceUsageTag &tag, VkBuffer buffer, VkDeviceSize offset) {
const auto *count_buf_state = Get<BUFFER_STATE>(buffer);
const ResourceAccessRange range = MakeRange(offset, 4);
context.UpdateAccessState(*count_buf_state, SYNC_DRAW_INDIRECT_INDIRECT_COMMAND_READ, SyncOrdering::kNonAttachment, range, tag);
}
bool SyncValidator::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
return skip;
}
void SyncValidator::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
StateTracker::PreCallRecordCmdDispatch(commandBuffer, x, y, z);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCH);
cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
}
bool SyncValidator::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDispatchIndirectCommand), buffer, offset,
1, sizeof(VkDispatchIndirectCommand), "vkCmdDispatchIndirect");
return skip;
}
void SyncValidator::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
StateTracker::PreCallRecordCmdDispatchIndirect(commandBuffer, buffer, offset);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_DISPATCHINDIRECT);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_COMPUTE, tag);
RecordIndirectBuffer(*context, tag, sizeof(VkDispatchIndirectCommand), buffer, offset, 1, sizeof(VkDispatchIndirectCommand));
}
bool SyncValidator::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
uint32_t firstVertex, uint32_t firstInstance) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
skip |= cb_access_context->ValidateDrawVertex(vertexCount, firstVertex, "vkCmdDraw");
skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDraw");
return skip;
}
void SyncValidator::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
uint32_t firstVertex, uint32_t firstInstance) {
StateTracker::PreCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_DRAW);
cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
cb_access_context->RecordDrawVertex(vertexCount, firstVertex, tag);
cb_access_context->RecordDrawSubpassAttachment(tag);
}
bool SyncValidator::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
skip |= cb_access_context->ValidateDrawVertexIndex(indexCount, firstIndex, "vkCmdDrawIndexed");
skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexed");
return skip;
}
void SyncValidator::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
StateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXED);
cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
cb_access_context->RecordDrawVertexIndex(indexCount, firstIndex, tag);
cb_access_context->RecordDrawSubpassAttachment(tag);
}
bool SyncValidator::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t drawCount, uint32_t stride) const {
bool skip = false;
if (drawCount == 0) return skip;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndirect");
skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
drawCount, stride, "vkCmdDrawIndirect");
// TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
// VkDrawIndirectCommand buffer could be changed until SubmitQueue.
// We will validate the vertex buffer in SubmitQueue in the future.
skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, "vkCmdDrawIndirect");
return skip;
}
void SyncValidator::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t drawCount, uint32_t stride) {
StateTracker::PreCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
if (drawCount == 0) return;
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECT);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
cb_access_context->RecordDrawSubpassAttachment(tag);
RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, drawCount, stride);
// TODO: For now, we record the whole vertex buffer. It might cause some false positive.
// VkDrawIndirectCommand buffer could be changed until SubmitQueue.
// We will record the vertex buffer in SubmitQueue in the future.
cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
}
bool SyncValidator::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t drawCount, uint32_t stride) const {
bool skip = false;
if (drawCount == 0) return skip;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
skip |= cb_access_context->ValidateDrawSubpassAttachment("vkCmdDrawIndexedIndirect");
skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
offset, drawCount, stride, "vkCmdDrawIndexedIndirect");
// TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
// VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
// We will validate the index and vertex buffer in SubmitQueue in the future.
skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, "vkCmdDrawIndexedIndirect");
return skip;
}
void SyncValidator::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t drawCount, uint32_t stride) {
StateTracker::PreCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECT);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
cb_access_context->RecordDrawSubpassAttachment(tag);
RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, drawCount, stride);
// TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
// VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
// We will record the index and vertex buffer in SubmitQueue in the future.
cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
}
bool SyncValidator::ValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride, const char *function) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndirectCommand), buffer, offset,
maxDrawCount, stride, function);
skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
// TODO: For now, we validate the whole vertex buffer. It might cause some false positive.
// VkDrawIndirectCommand buffer could be changed until SubmitQueue.
// We will validate the vertex buffer in SubmitQueue in the future.
skip |= cb_access_context->ValidateDrawVertex(UINT32_MAX, 0, function);
return skip;
}
bool SyncValidator::PreCallValidateCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride) const {
return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
"vkCmdDrawIndirectCount");
}
void SyncValidator::PreCallRecordCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride) {
StateTracker::PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
stride);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDIRECTCOUNT);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
cb_access_context->RecordDrawSubpassAttachment(tag);
RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndirectCommand), buffer, offset, 1, stride);
RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
// TODO: For now, we record the whole vertex buffer. It might cause some false positive.
// VkDrawIndirectCommand buffer could be changed until SubmitQueue.
// We will record the vertex buffer in SubmitQueue in the future.
cb_access_context->RecordDrawVertex(UINT32_MAX, 0, tag);
}
bool SyncValidator::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) const {
return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
"vkCmdDrawIndirectCountKHR");
}
void SyncValidator::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
StateTracker::PreCallRecordCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
stride);
PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
}
bool SyncValidator::PreCallValidateCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) const {
return ValidateCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
"vkCmdDrawIndirectCountAMD");
}
void SyncValidator::PreCallRecordCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
StateTracker::PreCallRecordCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount,
stride);
PreCallRecordCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
}
bool SyncValidator::ValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride, const char *function) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
skip |= cb_access_context->ValidateDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, function);
skip |= cb_access_context->ValidateDrawSubpassAttachment(function);
skip |= ValidateIndirectBuffer(*cb_access_context, *context, commandBuffer, sizeof(VkDrawIndexedIndirectCommand), buffer,
offset, maxDrawCount, stride, function);
skip |= ValidateCountBuffer(*cb_access_context, *context, commandBuffer, countBuffer, countBufferOffset, function);
// TODO: For now, we validate the whole index and vertex buffer. It might cause some false positive.
// VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
// We will validate the index and vertex buffer in SubmitQueue in the future.
skip |= cb_access_context->ValidateDrawVertexIndex(UINT32_MAX, 0, function);
return skip;
}
bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) const {
return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
"vkCmdDrawIndexedIndirectCount");
}
void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
StateTracker::PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_DRAWINDEXEDINDIRECTCOUNT);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
cb_access_context->RecordDispatchDrawDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, tag);
cb_access_context->RecordDrawSubpassAttachment(tag);
RecordIndirectBuffer(*context, tag, sizeof(VkDrawIndexedIndirectCommand), buffer, offset, 1, stride);
RecordCountBuffer(*context, tag, countBuffer, countBufferOffset);
// TODO: For now, we record the whole index and vertex buffer. It might cause some false positive.
// VkDrawIndexedIndirectCommand buffer could be changed until SubmitQueue.
// We will update the index and vertex buffer in SubmitQueue in the future.
cb_access_context->RecordDrawVertexIndex(UINT32_MAX, 0, tag);
}
bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer,
VkDeviceSize offset, VkBuffer countBuffer,
VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride) const {
return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
"vkCmdDrawIndexedIndirectCountKHR");
}
void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
StateTracker::PreCallRecordCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
}
bool SyncValidator::PreCallValidateCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer,
VkDeviceSize offset, VkBuffer countBuffer,
VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride) const {
return ValidateCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride,
"vkCmdDrawIndexedIndirectCountAMD");
}
void SyncValidator::PreCallRecordCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
StateTracker::PreCallRecordCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
PreCallRecordCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
}
bool SyncValidator::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *image_state = Get<IMAGE_STATE>(image);
for (uint32_t index = 0; index < rangeCount; index++) {
const auto &range = pRanges[index];
if (image_state) {
auto hazard =
context->DetectHazard(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
if (hazard.hazard) {
skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
"vkCmdClearColorImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
cb_access_context->FormatUsage(hazard).c_str());
}
}
}
return skip;
}
void SyncValidator::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_CLEARCOLORIMAGE);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
const auto *image_state = Get<IMAGE_STATE>(image);
for (uint32_t index = 0; index < rangeCount; index++) {
const auto &range = pRanges[index];
if (image_state) {
context->UpdateAccessState(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
image_state->createInfo.extent, tag);
}
}
}
bool SyncValidator::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image,
VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *image_state = Get<IMAGE_STATE>(image);
for (uint32_t index = 0; index < rangeCount; index++) {
const auto &range = pRanges[index];
if (image_state) {
auto hazard =
context->DetectHazard(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, range, {0, 0, 0}, image_state->createInfo.extent);
if (hazard.hazard) {
skip |= LogError(image, string_SyncHazardVUID(hazard.hazard),
"vkCmdClearDepthStencilImage: Hazard %s for %s, range index %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(image).c_str(), index,
cb_access_context->FormatUsage(hazard).c_str());
}
}
}
return skip;
}
void SyncValidator::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_CLEARDEPTHSTENCILIMAGE);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
const auto *image_state = Get<IMAGE_STATE>(image);
for (uint32_t index = 0; index < rangeCount; index++) {
const auto &range = pRanges[index];
if (image_state) {
context->UpdateAccessState(*image_state, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, {0, 0, 0},
image_state->createInfo.extent, tag);
}
}
}
bool SyncValidator::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
VkDeviceSize dstOffset, VkDeviceSize stride,
VkQueryResultFlags flags) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
if (hazard.hazard) {
skip |=
LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
"vkCmdCopyQueryPoolResults: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
}
}
// TODO:Track VkQueryPool
return skip;
}
void SyncValidator::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
StateTracker::PreCallRecordCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
stride, flags);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_COPYQUERYPOOLRESULTS);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, stride * queryCount);
context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
}
// TODO:Track VkQueryPool
}
bool SyncValidator::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
if (hazard.hazard) {
skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
"vkCmdFillBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
}
}
return skip;
}
void SyncValidator::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) {
StateTracker::PreCallRecordCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_FILLBUFFER);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(*dst_buffer, dstOffset, size);
context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
}
}
bool SyncValidator::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *src_image = Get<IMAGE_STATE>(srcImage);
const auto *dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto &resolve_region = pRegions[region];
if (src_image) {
auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, resolve_region.srcSubresource,
resolve_region.srcOffset, resolve_region.extent);
if (hazard.hazard) {
skip |= LogError(srcImage, string_SyncHazardVUID(hazard.hazard),
"vkCmdResolveImage: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(srcImage).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
}
if (dst_image) {
auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, resolve_region.dstSubresource,
resolve_region.dstOffset, resolve_region.extent);
if (hazard.hazard) {
skip |= LogError(dstImage, string_SyncHazardVUID(hazard.hazard),
"vkCmdResolveImage: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(dstImage).c_str(), region,
cb_access_context->FormatUsage(hazard).c_str());
}
if (skip) break;
}
}
return skip;
}
void SyncValidator::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) {
StateTracker::PreCallRecordCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
auto *src_image = Get<IMAGE_STATE>(srcImage);
auto *dst_image = Get<IMAGE_STATE>(dstImage);
for (uint32_t region = 0; region < regionCount; region++) {
const auto &resolve_region = pRegions[region];
if (src_image) {
context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
}
if (dst_image) {
context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
}
}
}
bool SyncValidator::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
const VkResolveImageInfo2KHR *pResolveImageInfo) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
const auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
const auto &resolve_region = pResolveImageInfo->pRegions[region];
if (src_image) {
auto hazard = context->DetectHazard(*src_image, SYNC_TRANSFER_TRANSFER_READ, resolve_region.srcSubresource,
resolve_region.srcOffset, resolve_region.extent);
if (hazard.hazard) {
skip |= LogError(pResolveImageInfo->srcImage, string_SyncHazardVUID(hazard.hazard),
"vkCmdResolveImage2KHR: Hazard %s for srcImage %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->srcImage).c_str(),
region, cb_access_context->FormatUsage(hazard).c_str());
}
}
if (dst_image) {
auto hazard = context->DetectHazard(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, resolve_region.dstSubresource,
resolve_region.dstOffset, resolve_region.extent);
if (hazard.hazard) {
skip |= LogError(pResolveImageInfo->dstImage, string_SyncHazardVUID(hazard.hazard),
"vkCmdResolveImage2KHR: Hazard %s for dstImage %s, region %" PRIu32 ". Access info %s.",
string_SyncHazard(hazard.hazard), report_data->FormatHandle(pResolveImageInfo->dstImage).c_str(),
region, cb_access_context->FormatUsage(hazard).c_str());
}
if (skip) break;
}
}
return skip;
}
void SyncValidator::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
const VkResolveImageInfo2KHR *pResolveImageInfo) {
StateTracker::PreCallRecordCmdResolveImage2KHR(commandBuffer, pResolveImageInfo);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_RESOLVEIMAGE2KHR);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
auto *src_image = Get<IMAGE_STATE>(pResolveImageInfo->srcImage);
auto *dst_image = Get<IMAGE_STATE>(pResolveImageInfo->dstImage);
for (uint32_t region = 0; region < pResolveImageInfo->regionCount; region++) {
const auto &resolve_region = pResolveImageInfo->pRegions[region];
if (src_image) {
context->UpdateAccessState(*src_image, SYNC_TRANSFER_TRANSFER_READ, SyncOrdering::kNonAttachment,
resolve_region.srcSubresource, resolve_region.srcOffset, resolve_region.extent, tag);
}
if (dst_image) {
context->UpdateAccessState(*dst_image, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment,
resolve_region.dstSubresource, resolve_region.dstOffset, resolve_region.extent, tag);
}
}
}
bool SyncValidator::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const void *pData) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
// VK_WHOLE_SIZE not allowed
const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
if (hazard.hazard) {
skip |= LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
"vkCmdUpdateBuffer: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
}
}
return skip;
}
void SyncValidator::PreCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const void *pData) {
StateTracker::PreCallRecordCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_UPDATEBUFFER);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
// VK_WHOLE_SIZE not allowed
const ResourceAccessRange range = MakeRange(dstOffset, dataSize);
context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
}
}
bool SyncValidator::PreCallValidateCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) const {
bool skip = false;
const auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
if (!cb_access_context) return skip;
const auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
if (!context) return skip;
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, 4);
auto hazard = context->DetectHazard(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, range);
if (hazard.hazard) {
skip |=
LogError(dstBuffer, string_SyncHazardVUID(hazard.hazard),
"vkCmdWriteBufferMarkerAMD: Hazard %s for dstBuffer %s. Access info %s.", string_SyncHazard(hazard.hazard),
report_data->FormatHandle(dstBuffer).c_str(), cb_access_context->FormatUsage(hazard).c_str());
}
}
return skip;
}
void SyncValidator::PreCallRecordCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) {
StateTracker::PreCallRecordCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
auto *cb_access_context = GetAccessContext(commandBuffer);
assert(cb_access_context);
const auto tag = cb_access_context->NextCommandTag(CMD_WRITEBUFFERMARKERAMD);
auto *context = cb_access_context->GetCurrentAccessContext();
assert(context);
const auto *dst_buffer = Get<BUFFER_STATE>(dstBuffer);
if (dst_buffer) {
const ResourceAccessRange range = MakeRange(dstOffset, 4);
context->UpdateAccessState(*dst_buffer, SYNC_TRANSFER_TRANSFER_WRITE, SyncOrdering::kNonAttachment, range, tag);
}
}
bool SyncValidator::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
bool skip = false;
const auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return skip;
SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
return set_event_op.Validate(*cb_context);
}
void SyncValidator::PostCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
StateTracker::PostCallRecordCmdSetEvent(commandBuffer, event, stageMask);
auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return;
SyncOpSetEvent set_event_op(CMD_SETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
set_event_op.Record(cb_context);
}
bool SyncValidator::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event,
VkPipelineStageFlags stageMask) const {
bool skip = false;
const auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return skip;
SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
return reset_event_op.Validate(*cb_context);
}
void SyncValidator::PostCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
StateTracker::PostCallRecordCmdResetEvent(commandBuffer, event, stageMask);
auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return;
SyncOpResetEvent reset_event_op(CMD_RESETEVENT, *this, cb_context->GetQueueFlags(), event, stageMask);
reset_event_op.Record(cb_context);
}
bool SyncValidator::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
bool skip = false;
const auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return skip;
SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return wait_events_op.Validate(*cb_context);
}
void SyncValidator::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
StateTracker::PostCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
auto *cb_context = GetAccessContext(commandBuffer);
assert(cb_context);
if (!cb_context) return;
SyncOpWaitEvents wait_events_op(CMD_WAITEVENTS, *this, cb_context->GetQueueFlags(), eventCount, pEvents, srcStageMask,
dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return wait_events_op.Record(cb_context);
}
void SyncEventState::ResetFirstScope() {
for (const auto address_type : kAddressTypes) {
first_scope[static_cast<size_t>(address_type)].clear();
}
scope = SyncExecScope();
}
// Keep the "ignore this event" logic in same place for ValidateWait and RecordWait to use
SyncEventState::IgnoreReason SyncEventState::IsIgnoredByWait(VkPipelineStageFlags srcStageMask) const {
IgnoreReason reason = NotIgnored;
if (last_command == CMD_RESETEVENT && !HasBarrier(0U, 0U)) {
reason = ResetWaitRace;
} else if (unsynchronized_set) {
reason = SetRace;
} else {
const VkPipelineStageFlags missing_bits = scope.mask_param & ~srcStageMask;
if (missing_bits) reason = MissingStageBits;
}
return reason;
}
bool SyncEventState::HasBarrier(VkPipelineStageFlags stageMask, VkPipelineStageFlags exec_scope_arg) const {
bool has_barrier = (last_command == CMD_NONE) || (stageMask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(barriers & exec_scope_arg) || (barriers & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
return has_barrier;
}
SyncOpBarriers::SyncOpBarriers(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers)
: SyncOpBase(cmd),
dependency_flags_(dependencyFlags),
src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, srcStageMask)),
dst_exec_scope_(SyncExecScope::MakeDst(queue_flags, dstStageMask)) {
// Translate the API parameters into structures SyncVal understands directly, and dehandle for safer/faster replay.
MakeMemoryBarriers(src_exec_scope_, dst_exec_scope_, dependencyFlags, memoryBarrierCount, pMemoryBarriers);
MakeBufferMemoryBarriers(sync_state, src_exec_scope_, dst_exec_scope_, dependencyFlags, bufferMemoryBarrierCount,
pBufferMemoryBarriers);
MakeImageMemoryBarriers(sync_state, src_exec_scope_, dst_exec_scope_, dependencyFlags, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
SyncOpPipelineBarrier::SyncOpPipelineBarrier(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags,
VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers)
: SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers) {}
bool SyncOpPipelineBarrier::Validate(const CommandBufferAccessContext &cb_context) const {
bool skip = false;
const auto *context = cb_context.GetCurrentAccessContext();
assert(context);
if (!context) return skip;
// Validate Image Layout transitions
for (const auto &image_barrier : image_memory_barriers_) {
if (image_barrier.new_layout == image_barrier.old_layout) continue; // Only interested in layout transitions at this point.
const auto *image_state = image_barrier.image.get();
if (!image_state) continue;
const auto hazard = context->DetectImageBarrierHazard(image_barrier);
if (hazard.hazard) {
// PHASE1 TODO -- add tag information to log msg when useful.
const auto &sync_state = cb_context.GetSyncState();
const auto image_handle = image_state->image;
skip |= sync_state.LogError(image_handle, string_SyncHazardVUID(hazard.hazard),
"vkCmdPipelineBarrier: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.",
string_SyncHazard(hazard.hazard), image_barrier.index,
sync_state.report_data->FormatHandle(image_handle).c_str(),
cb_context.FormatUsage(hazard).c_str());
}
}
return skip;
}
struct SyncOpPipelineBarrierFunctorFactory {
using BarrierOpFunctor = PipelineBarrierOp;
using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
using GlobalBarrierOpFunctor = PipelineBarrierOp;
using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
using BufferRange = ResourceAccessRange;
using ImageRange = subresource_adapter::ImageRangeGenerator;
using GlobalRange = ResourceAccessRange;
ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier, bool layout_transition) const {
return ApplyFunctor(BarrierOpFunctor(barrier, layout_transition));
}
GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, const ResourceUsageTag &tag) const {
return GlobalApplyFunctor(true /* resolve */, size_hint, tag);
}
GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier) const {
return GlobalBarrierOpFunctor(barrier, false);
}
BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range) const {
if (!SimpleBinding(buffer)) return ResourceAccessRange();
const auto base_address = ResourceBaseAddress(buffer);
return (range + base_address);
}
ImageRange MakeRangeGen(const IMAGE_STATE &image, const SyncImageMemoryBarrier::SubImageRange &range) const {
if (!SimpleBinding(image)) return subresource_adapter::ImageRangeGenerator();
const auto base_address = ResourceBaseAddress(image);
subresource_adapter::ImageRangeGenerator range_gen(*image.fragment_encoder.get(), range.subresource_range, range.offset,
range.extent, base_address);
return range_gen;
}
GlobalRange MakeGlobalRangeGen(AccessAddressType) const { return kFullRange; }
};
template <typename Barriers, typename FunctorFactory>
void SyncOpBarriers::ApplyBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag &tag,
AccessContext *context) {
for (const auto &barrier : barriers) {
const auto *state = barrier.GetState();
if (state) {
auto *const accesses = &context->GetAccessStateMap(GetAccessAddressType(*state));
auto update_action = factory.MakeApplyFunctor(barrier.barrier, barrier.IsLayoutTransition());
auto range_gen = factory.MakeRangeGen(*state, barrier.Range());
UpdateMemoryAccessState(accesses, update_action, &range_gen);
}
}
}
template <typename Barriers, typename FunctorFactory>
void SyncOpBarriers::ApplyGlobalBarriers(const Barriers &barriers, const FunctorFactory &factory, const ResourceUsageTag &tag,
AccessContext *access_context) {
auto barriers_functor = factory.MakeGlobalApplyFunctor(barriers.size(), tag);
for (const auto &barrier : barriers) {
barriers_functor.EmplaceBack(factory.MakeGlobalBarrierOpFunctor(barrier));
}
for (const auto address_type : kAddressTypes) {
auto range_gen = factory.MakeGlobalRangeGen(address_type);
UpdateMemoryAccessState(&(access_context->GetAccessStateMap(address_type)), barriers_functor, &range_gen);
}
}
void SyncOpPipelineBarrier::Record(CommandBufferAccessContext *cb_context) const {
SyncOpPipelineBarrierFunctorFactory factory;
auto *access_context = cb_context->GetCurrentAccessContext();
const auto tag = cb_context->NextCommandTag(cmd_);
ApplyBarriers(buffer_memory_barriers_, factory, tag, access_context);
ApplyBarriers(image_memory_barriers_, factory, tag, access_context);
ApplyGlobalBarriers(memory_barriers_, factory, tag, access_context);
cb_context->ApplyGlobalBarriersToEvents(src_exec_scope_, dst_exec_scope_);
}
void SyncOpBarriers::MakeMemoryBarriers(const SyncExecScope &src, const SyncExecScope &dst, VkDependencyFlags dependency_flags,
uint32_t memory_barrier_count, const VkMemoryBarrier *memory_barriers) {
memory_barriers_.reserve(std::min<uint32_t>(1, memory_barrier_count));
for (uint32_t barrier_index = 0; barrier_index < memory_barrier_count; barrier_index++) {
const auto &barrier = memory_barriers[barrier_index];
SyncBarrier sync_barrier(barrier, src, dst);
memory_barriers_.emplace_back(sync_barrier);
}
if (0 == memory_barrier_count) {
// If there are no global memory barriers, force an exec barrier
memory_barriers_.emplace_back(SyncBarrier(src, dst));
}
}
void SyncOpBarriers::MakeBufferMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src, const SyncExecScope &dst,
VkDependencyFlags dependencyFlags, uint32_t barrier_count,
const VkBufferMemoryBarrier *barriers) {
buffer_memory_barriers_.reserve(barrier_count);
for (uint32_t index = 0; index < barrier_count; index++) {
const auto &barrier = barriers[index];
auto buffer = sync_state.GetShared<BUFFER_STATE>(barrier.buffer);
if (buffer) {
const auto barrier_size = GetBufferWholeSize(*buffer, barrier.offset, barrier.size);
const auto range = MakeRange(barrier.offset, barrier_size);
const SyncBarrier sync_barrier(barrier, src, dst);
buffer_memory_barriers_.emplace_back(buffer, sync_barrier, range);
} else {
buffer_memory_barriers_.emplace_back();
}
}
}
void SyncOpBarriers::MakeImageMemoryBarriers(const SyncValidator &sync_state, const SyncExecScope &src, const SyncExecScope &dst,
VkDependencyFlags dependencyFlags, uint32_t barrier_count,
const VkImageMemoryBarrier *barriers) {
image_memory_barriers_.reserve(barrier_count);
for (uint32_t index = 0; index < barrier_count; index++) {
const auto &barrier = barriers[index];
const auto image = sync_state.GetShared<IMAGE_STATE>(barrier.image);
if (image) {
auto subresource_range = NormalizeSubresourceRange(image->createInfo, barrier.subresourceRange);
const SyncBarrier sync_barrier(barrier, src, dst);
image_memory_barriers_.emplace_back(image, index, sync_barrier, barrier.oldLayout, barrier.newLayout,
subresource_range);
} else {
image_memory_barriers_.emplace_back();
image_memory_barriers_.back().index = index; // Just in case we're interested in the ones we skipped.
}
}
}
SyncOpWaitEvents::SyncOpWaitEvents(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, uint32_t eventCount,
const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
: SyncOpBarriers(cmd, sync_state, queue_flags, srcStageMask, dstStageMask, VkDependencyFlags(0U), memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers) {
MakeEventsList(sync_state, eventCount, pEvents);
}
bool SyncOpWaitEvents::Validate(const CommandBufferAccessContext &cb_context) const {
const char *const ignored = "Wait operation is ignored for this event.";
bool skip = false;
const auto &sync_state = cb_context.GetSyncState();
const auto command_buffer_handle = cb_context.GetCBState().commandBuffer;
if (src_exec_scope_.mask_param & VK_PIPELINE_STAGE_HOST_BIT) {
const char *const vuid = "SYNC-vkCmdWaitEvents-hostevent-unsupported";
skip = sync_state.LogInfo(command_buffer_handle, vuid,
"%s, srcStageMask includes %s, unsupported by synchronization validaton.", CmdName(),
string_VkPipelineStageFlagBits(VK_PIPELINE_STAGE_HOST_BIT), ignored);
}
VkPipelineStageFlags event_stage_masks = 0U;
bool events_not_found = false;
const auto *events_context = cb_context.GetCurrentEventsContext();
assert(events_context);
for (const auto &sync_event_pair : *events_context) {
const auto *sync_event = sync_event_pair.second.get();
if (!sync_event) {
// NOTE PHASE2: This is where we'll need queue submit time validation to come back and check the srcStageMask bits
// or solve this with replay creating the SyncEventState in the queue context... also this will be a
// new validation error... wait without previously submitted set event...
events_not_found = true; // Demote "extra_stage_bits" error to warning, to avoid false positives at *record time*
continue; // Core, Lifetimes, or Param check needs to catch invalid events.
}
const auto event = sync_event->event->event;
// TODO add "destroyed" checks
event_stage_masks |= sync_event->scope.mask_param;
const auto ignore_reason = sync_event->IsIgnoredByWait(src_exec_scope_.mask_param);
if (ignore_reason) {
switch (ignore_reason) {
case SyncEventState::ResetWaitRace: {
const char *const vuid = "SYNC-vkCmdWaitEvents-missingbarrier-reset";
const char *const message =
"%s: %s %s operation following %s without intervening execution barrier, may cause race condition. %s";
skip |=
sync_state.LogError(event, vuid, message, CmdName(), sync_state.report_data->FormatHandle(event).c_str(),
CmdName(), CommandTypeString(sync_event->last_command), ignored);
break;
}
case SyncEventState::SetRace: {
// Issue error message that Wait is waiting on an signal subject to race condition, and is thus ignored for this
// event
const char *const vuid = "SYNC-vkCmdWaitEvents-unsynchronized-setops";
const char *const message =
"%s: %s Unsychronized %s calls result in race conditions w.r.t. event signalling, %s %s";
const char *const reason = "First synchronization scope is undefined.";
skip |=
sync_state.LogError(event, vuid, message, CmdName(), sync_state.report_data->FormatHandle(event).c_str(),
CommandTypeString(sync_event->last_command), reason, ignored);
break;
}
case SyncEventState::MissingStageBits: {
const VkPipelineStageFlags missing_bits = sync_event->scope.mask_param & ~src_exec_scope_.mask_param;
// Issue error message that event waited for is not in wait events scope
const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
const char *const message =
"%s: %s stageMask 0x%" PRIx32 " includes bits not present in srcStageMask 0x%" PRIx32
". Bits missing from srcStageMask %s. %s";
skip |=
sync_state.LogError(event, vuid, message, CmdName(), sync_state.report_data->FormatHandle(event).c_str(),
sync_event->scope.mask_param, src_exec_scope_.mask_param,
string_VkPipelineStageFlags(missing_bits).c_str(), ignored);
break;
}
default:
assert(ignore_reason == SyncEventState::NotIgnored);
}
} else if (image_memory_barriers_.size()) {
const auto *context = cb_context.GetCurrentAccessContext();
assert(context);
for (const auto &image_memory_barrier : image_memory_barriers_) {
if (image_memory_barrier.old_layout == image_memory_barrier.new_layout) continue;
const auto *image_state = image_memory_barrier.image.get();
if (!image_state) continue;
const auto &subresource_range = image_memory_barrier.range.subresource_range;
const auto &src_access_scope = image_memory_barrier.barrier.src_access_scope;
const auto hazard =
context->DetectImageBarrierHazard(*image_state, sync_event->scope.exec_scope, src_access_scope,
subresource_range, *sync_event, AccessContext::DetectOptions::kDetectAll);
if (hazard.hazard) {
skip |= sync_state.LogError(image_state->image, string_SyncHazardVUID(hazard.hazard),
"%s: Hazard %s for image barrier %" PRIu32 " %s. Access info %s.", CmdName(),
string_SyncHazard(hazard.hazard), image_memory_barrier.index,
sync_state.report_data->FormatHandle(image_state->image).c_str(),
cb_context.FormatUsage(hazard).c_str());
break;
}
}
}
}
// Note that we can't check for HOST in pEvents as we don't track that set event type
const auto extra_stage_bits = (src_exec_scope_.mask_param & ~VK_PIPELINE_STAGE_HOST_BIT) & ~event_stage_masks;
if (extra_stage_bits) {
// Issue error message that event waited for is not in wait events scope
const char *const vuid = "VUID-vkCmdWaitEvents-srcStageMask-01158";
const char *const message =
"%s: srcStageMask 0x%" PRIx32 " contains stages not present in pEvents stageMask. Extra stages are %s.%s";
if (events_not_found) {
skip |= sync_state.LogInfo(command_buffer_handle, vuid, message, CmdName(), src_exec_scope_.mask_param,
string_VkPipelineStageFlags(extra_stage_bits).c_str(),
" vkCmdSetEvent may be in previously submitted command buffer.");
} else {
skip |= sync_state.LogError(command_buffer_handle, vuid, message, CmdName(), src_exec_scope_.mask_param,
string_VkPipelineStageFlags(extra_stage_bits).c_str(), "");
}
}
return skip;
}
struct SyncOpWaitEventsFunctorFactory {
using BarrierOpFunctor = WaitEventBarrierOp;
using ApplyFunctor = ApplyBarrierFunctor<BarrierOpFunctor>;
using GlobalBarrierOpFunctor = WaitEventBarrierOp;
using GlobalApplyFunctor = ApplyBarrierOpsFunctor<GlobalBarrierOpFunctor>;
using BufferRange = EventSimpleRangeGenerator;
using ImageRange = EventImageRangeGenerator;
using GlobalRange = EventSimpleRangeGenerator;
// Need to restrict to only valid exec and access scope for this event
// Pass by value is intentional to get a copy we can change without modifying the passed barrier
SyncBarrier RestrictToEvent(SyncBarrier barrier) const {
barrier.src_exec_scope = sync_event->scope.exec_scope & barrier.src_exec_scope;
barrier.src_access_scope = sync_event->scope.valid_accesses & barrier.src_access_scope;
return barrier;
}
ApplyFunctor MakeApplyFunctor(const SyncBarrier &barrier_arg, bool layout_transition) const {
auto barrier = RestrictToEvent(barrier_arg);
return ApplyFunctor(BarrierOpFunctor(sync_event->first_scope_tag, barrier, layout_transition));
}
GlobalApplyFunctor MakeGlobalApplyFunctor(size_t size_hint, const ResourceUsageTag &tag) const {
return GlobalApplyFunctor(false /* don't resolve */, size_hint, tag);
}
GlobalBarrierOpFunctor MakeGlobalBarrierOpFunctor(const SyncBarrier &barrier_arg) const {
auto barrier = RestrictToEvent(barrier_arg);
return GlobalBarrierOpFunctor(sync_event->first_scope_tag, barrier, false);
}
BufferRange MakeRangeGen(const BUFFER_STATE &buffer, const ResourceAccessRange &range_arg) const {
const AccessAddressType address_type = GetAccessAddressType(buffer);
const auto base_address = ResourceBaseAddress(buffer);
ResourceAccessRange range = SimpleBinding(buffer) ? (range_arg + base_address) : ResourceAccessRange();
EventSimpleRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), range);
return filtered_range_gen;
}
ImageRange MakeRangeGen(const IMAGE_STATE &image, const SyncImageMemoryBarrier::SubImageRange &range) const {
if (!SimpleBinding(image)) return ImageRange();
const auto address_type = GetAccessAddressType(image);
const auto base_address = ResourceBaseAddress(image);
subresource_adapter::ImageRangeGenerator image_range_gen(*image.fragment_encoder.get(), range.subresource_range,
range.offset, range.extent, base_address);
EventImageRangeGenerator filtered_range_gen(sync_event->FirstScope(address_type), image_range_gen);
return filtered_range_gen;
}
GlobalRange MakeGlobalRangeGen(AccessAddressType address_type) const {
return EventSimpleRangeGenerator(sync_event->FirstScope(address_type), kFullRange);
}
SyncOpWaitEventsFunctorFactory(SyncEventState *sync_event_) : sync_event(sync_event_) { assert(sync_event); }
SyncEventState *sync_event;
};
void SyncOpWaitEvents::Record(CommandBufferAccessContext *cb_context) const {
const auto tag = cb_context->NextCommandTag(cmd_);
auto *access_context = cb_context->GetCurrentAccessContext();
assert(access_context);
if (!access_context) return;
auto *events_context = cb_context->GetCurrentEventsContext();
assert(events_context);
if (!events_context) return;
// Unlike PipelineBarrier, WaitEvent is *not* limited to accesses within the current subpass (if any) and thus needs to import
// all accesses. Can instead import for all first_scopes, or a union of them, if this becomes a performance/memory issue,
// but with no idea of the performance of the union, nor of whether it even matters... take the simplest approach here,
access_context->ResolvePreviousAccesses();
const auto &dst = dst_exec_scope_;
// TODO... this needs change the SyncEventContext it's using depending on whether this is replay... the recorded
// sync_event will be in the recorded context, but we need to update the sync_events in the current context....
for (auto &event_shared : events_) {
if (!event_shared.get()) continue;
auto *sync_event = events_context->GetFromShared(event_shared);
sync_event->last_command = CMD_WAITEVENTS;
if (!sync_event->IsIgnoredByWait(src_exec_scope_.mask_param)) {
// These apply barriers one at a time as the are restricted to the resource ranges specified per each barrier,
// but do not update the dependency chain information (but set the "pending" state) // s.t. the order independence
// of the barriers is maintained.
SyncOpWaitEventsFunctorFactory factory(sync_event);
ApplyBarriers(buffer_memory_barriers_, factory, tag, access_context);
ApplyBarriers(image_memory_barriers_, factory, tag, access_context);
ApplyGlobalBarriers(memory_barriers_, factory, tag, access_context);
// Apply the global barrier to the event itself (for race condition tracking)
// Events don't happen at a stage, so we need to store the unexpanded ALL_COMMANDS if set for inter-event-calls
sync_event->barriers = dst.mask_param & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
sync_event->barriers |= dst.exec_scope;
} else {
// We ignored this wait, so we don't have any effective synchronization barriers for it.
sync_event->barriers = 0U;
}
}
// Apply the pending barriers
ResolvePendingBarrierFunctor apply_pending_action(tag);
access_context->ApplyToContext(apply_pending_action);
}
void SyncOpWaitEvents::MakeEventsList(const SyncValidator &sync_state, uint32_t event_count, const VkEvent *events) {
events_.reserve(event_count);
for (uint32_t event_index = 0; event_index < event_count; event_index++) {
events_.emplace_back(sync_state.GetShared<EVENT_STATE>(events[event_index]));
}
}
SyncOpResetEvent::SyncOpResetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
VkPipelineStageFlags stageMask)
: SyncOpBase(cmd),
event_(sync_state.GetShared<EVENT_STATE>(event)),
exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
bool SyncOpResetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
auto *events_context = cb_context.GetCurrentEventsContext();
assert(events_context);
bool skip = false;
if (!events_context) return skip;
const auto &sync_state = cb_context.GetSyncState();
const auto *sync_event = events_context->Get(event_);
if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
const char *const set_wait =
"%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
"hazards.";
const char *message = set_wait; // Only one message this call.
if (!sync_event->HasBarrier(exec_scope_.mask_param, exec_scope_.exec_scope)) {
const char *vuid = nullptr;
switch (sync_event->last_command) {
case CMD_SETEVENT:
// Needs a barrier between set and reset
vuid = "SYNC-vkCmdResetEvent-missingbarrier-set";
break;
case CMD_WAITEVENTS: {
// Needs to be in the barriers chain (either because of a barrier, or because of dstStageMask
vuid = "SYNC-vkCmdResetEvent-missingbarrier-wait";
break;
}
default:
// The only other valid last command that wasn't one.
assert((sync_event->last_command == CMD_NONE) || (sync_event->last_command == CMD_RESETEVENT));
break;
}
if (vuid) {
skip |= sync_state.LogError(event_->event, vuid, message, CmdName(),
sync_state.report_data->FormatHandle(event_->event).c_str(), CmdName(),
CommandTypeString(sync_event->last_command));
}
}
return skip;
}
void SyncOpResetEvent::Record(CommandBufferAccessContext *cb_context) const {
auto *events_context = cb_context->GetCurrentEventsContext();
assert(events_context);
if (!events_context) return;
auto *sync_event = events_context->GetFromShared(event_);
if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
// Update the event state
sync_event->last_command = cmd_;
sync_event->unsynchronized_set = CMD_NONE;
sync_event->ResetFirstScope();
sync_event->barriers = 0U;
}
SyncOpSetEvent::SyncOpSetEvent(CMD_TYPE cmd, const SyncValidator &sync_state, VkQueueFlags queue_flags, VkEvent event,
VkPipelineStageFlags stageMask)
: SyncOpBase(cmd),
event_(sync_state.GetShared<EVENT_STATE>(event)),
src_exec_scope_(SyncExecScope::MakeSrc(queue_flags, stageMask)) {}
bool SyncOpSetEvent::Validate(const CommandBufferAccessContext &cb_context) const {
// I'll put this here just in case we need to pass this in for future extension support
bool skip = false;
const auto &sync_state = cb_context.GetSyncState();
auto *events_context = cb_context.GetCurrentEventsContext();
assert(events_context);
if (!events_context) return skip;
const auto *sync_event = events_context->Get(event_);
if (!sync_event) return skip; // Core, Lifetimes, or Param check needs to catch invalid events.
const char *const reset_set =
"%s: %s %s operation following %s without intervening execution barrier, is a race condition and may result in data "
"hazards.";
const char *const wait =
"%s: %s %s operation following %s without intervening vkCmdResetEvent, may result in data hazard and is ignored.";
if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
const char *vuid = nullptr;
const char *message = nullptr;
switch (sync_event->last_command) {
case CMD_RESETEVENT:
// Needs a barrier between reset and set
vuid = "SYNC-vkCmdSetEvent-missingbarrier-reset";
message = reset_set;
break;
case CMD_SETEVENT:
// Needs a barrier between set and set
vuid = "SYNC-vkCmdSetEvent-missingbarrier-set";
message = reset_set;
break;
case CMD_WAITEVENTS:
// Needs a barrier or is in second execution scope
vuid = "SYNC-vkCmdSetEvent-missingbarrier-wait";
message = wait;
break;
default:
// The only other valid last command that wasn't one.
assert(sync_event->last_command == CMD_NONE);
break;
}
if (vuid) {
assert(nullptr != message);
skip |= sync_state.LogError(event_->event, vuid, message, CmdName(),
sync_state.report_data->FormatHandle(event_->event).c_str(), CmdName(),
CommandTypeString(sync_event->last_command));
}
}
return skip;
}
void SyncOpSetEvent::Record(CommandBufferAccessContext *cb_context) const {
const auto tag = cb_context->NextCommandTag(cmd_);
auto *events_context = cb_context->GetCurrentEventsContext();
auto *access_context = cb_context->GetCurrentAccessContext();
assert(events_context);
if (!events_context) return;
auto *sync_event = events_context->GetFromShared(event_);
if (!sync_event) return; // Core, Lifetimes, or Param check needs to catch invalid events.
// NOTE: We're going to simply record the sync scope here, as anything else would be implementation defined/undefined
// and we're issuing errors re: missing barriers between event commands, which if the user fixes would fix
// any issues caused by naive scope setting here.
// What happens with two SetEvent is that one cannot know what group of operations will be waited for.
// Given:
// Stuff1; SetEvent; Stuff2; SetEvent; WaitEvents;
// WaitEvents cannot know which of Stuff1, Stuff2, or both has completed execution.
if (!sync_event->HasBarrier(src_exec_scope_.mask_param, src_exec_scope_.exec_scope)) {
sync_event->unsynchronized_set = sync_event->last_command;
sync_event->ResetFirstScope();
} else if (sync_event->scope.exec_scope == 0) {
// We only set the scope if there isn't one
sync_event->scope = src_exec_scope_;
auto set_scope = [&sync_event](AccessAddressType address_type, const ResourceAccessRangeMap::value_type &access) {
auto &scope_map = sync_event->first_scope[static_cast<size_t>(address_type)];
if (access.second.InSourceScopeOrChain(sync_event->scope.exec_scope, sync_event->scope.valid_accesses)) {
scope_map.insert(scope_map.end(), std::make_pair(access.first, true));
}
};
access_context->ForAll(set_scope);
sync_event->unsynchronized_set = CMD_NONE;
sync_event->first_scope_tag = tag;
}
sync_event->last_command = CMD_SETEVENT;
sync_event->barriers = 0U;
}
SyncOpBeginRenderPass::SyncOpBeginRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state,
const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo, const char *cmd_name)
: SyncOpBase(cmd, cmd_name) {
if (pRenderPassBegin) {
rp_state_ = sync_state.GetShared<RENDER_PASS_STATE>(pRenderPassBegin->renderPass);
renderpass_begin_info_ = safe_VkRenderPassBeginInfo(pRenderPassBegin);
const auto *fb_state = sync_state.Get<FRAMEBUFFER_STATE>(pRenderPassBegin->framebuffer);
if (fb_state) {
shared_attachments_ = sync_state.GetSharedAttachmentViews(*renderpass_begin_info_.ptr(), *fb_state);
// TODO: Revisit this when all attachment validation is through SyncOps to see if we can discard the plain pointer copy
// Note that this a safe to presist as long as shared_attachments is not cleared
attachments_.reserve(shared_attachments_.size());
for (const auto attachment : shared_attachments_) {
attachments_.emplace_back(attachment.get());
}
}
if (pSubpassBeginInfo) {
subpass_begin_info_ = safe_VkSubpassBeginInfo(pSubpassBeginInfo);
}
}
}
bool SyncOpBeginRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
// Check if any of the layout transitions are hazardous.... but we don't have the renderpass context to work with, so we
bool skip = false;
assert(rp_state_.get());
if (nullptr == rp_state_.get()) return skip;
auto &rp_state = *rp_state_.get();
const uint32_t subpass = 0;
// Construct the state we can use to validate against... (since validation is const and RecordCmdBeginRenderPass
// hasn't happened yet)
const std::vector<AccessContext> empty_context_vector;
AccessContext temp_context(subpass, cb_context.GetQueueFlags(), rp_state.subpass_dependencies, empty_context_vector,
cb_context.GetCurrentAccessContext());
// Validate attachment operations
if (attachments_.size() == 0) return skip;
const auto &render_area = renderpass_begin_info_.renderArea;
skip |= temp_context.ValidateLayoutTransitions(cb_context, rp_state, render_area, subpass, attachments_, CmdName());
// Validate load operations if there were no layout transition hazards
if (!skip) {
temp_context.RecordLayoutTransitions(rp_state, subpass, attachments_, kCurrentCommandTag);
skip |= temp_context.ValidateLoadOperation(cb_context, rp_state, render_area, subpass, attachments_, CmdName());
}
return skip;
}
void SyncOpBeginRenderPass::Record(CommandBufferAccessContext *cb_context) const {
// TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
assert(rp_state_.get());
if (nullptr == rp_state_.get()) return;
const auto tag = cb_context->NextCommandTag(cmd_);
cb_context->RecordBeginRenderPass(*rp_state_.get(), renderpass_begin_info_.renderArea, attachments_, tag);
}
SyncOpNextSubpass::SyncOpNextSubpass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo, const char *name_override)
: SyncOpBase(cmd, name_override) {
if (pSubpassBeginInfo) {
subpass_begin_info_.initialize(pSubpassBeginInfo);
}
if (pSubpassEndInfo) {
subpass_end_info_.initialize(pSubpassEndInfo);
}
}
bool SyncOpNextSubpass::Validate(const CommandBufferAccessContext &cb_context) const {
bool skip = false;
const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
if (!renderpass_context) return skip;
skip |= renderpass_context->ValidateNextSubpass(cb_context.GetExecutionContext(), CmdName());
return skip;
}
void SyncOpNextSubpass::Record(CommandBufferAccessContext *cb_context) const {
// TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
cb_context->RecordNextSubpass(cmd_);
}
SyncOpEndRenderPass::SyncOpEndRenderPass(CMD_TYPE cmd, const SyncValidator &sync_state, const VkSubpassEndInfo *pSubpassEndInfo,
const char *name_override)
: SyncOpBase(cmd, name_override) {
if (pSubpassEndInfo) {
subpass_end_info_.initialize(pSubpassEndInfo);
}
}
bool SyncOpEndRenderPass::Validate(const CommandBufferAccessContext &cb_context) const {
bool skip = false;
const auto *renderpass_context = cb_context.GetCurrentRenderPassContext();
if (!renderpass_context) return skip;
skip |= renderpass_context->ValidateEndRenderPass(cb_context.GetExecutionContext(), CmdName());
return skip;
}
void SyncOpEndRenderPass::Record(CommandBufferAccessContext *cb_context) const {
// TODO PHASE2 need to have a consistent way to record to either command buffer or queue contexts
cb_context->RecordEndRenderPass(cmd_);
}
| 1 | 15,125 | @sfricke-samsung -- thanks. saves a few atomic ops which is always good :) The TODO is there to track that this object is storing *both* vectors of shared_ptr and plain pointers to the same data. The shared are to ensure scope, and the plain are for backwards compatibility with existing code that consumed a plain pointer vector (and some code that still does). That should be cleaned up. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -16,10 +16,16 @@
//
using System;
+using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection;
using FluentAssertions;
+using Nethermind.Core;
+using Nethermind.Logging;
+using Nethermind.Monitoring.Config;
+using Nethermind.Monitoring.Metrics;
+using Nethermind.Runner;
using NUnit.Framework;
namespace Nethermind.Monitoring.Test | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
//
using System;
using System.IO;
using System.Linq;
using System.Reflection;
using FluentAssertions;
using NUnit.Framework;
namespace Nethermind.Monitoring.Test
{
[TestFixture]
public class MetricsTests
{
[Test]
public void All_config_items_have_descriptions()
{
ValidateMetricsDescriptions();
}
public static void ValidateMetricsDescriptions()
{
ForEachProperty(CheckDescribedOrHidden);
}
private static void CheckDescribedOrHidden(PropertyInfo property)
{
System.ComponentModel.DescriptionAttribute attribute = property.GetCustomAttribute<System.ComponentModel.DescriptionAttribute>();
attribute.Should().NotBeNull();
}
private static void ForEachProperty(Action<PropertyInfo> verifier)
{
string[] dlls = Directory.GetFiles(AppDomain.CurrentDomain.BaseDirectory, "Nethermind.*.dll");
foreach (string dll in dlls)
{
TestContext.WriteLine($"Verify {nameof(MetricsTests)} on {Path.GetFileName(dll)}");
Assembly assembly = Assembly.LoadFile(dll);
Type[] configs = assembly.GetExportedTypes().Where(t => t.Name == "Metrics").ToArray();
foreach (Type metricsType in configs)
{
TestContext.WriteLine($" Verifying type {metricsType.FullName}");
PropertyInfo[] properties = metricsType.GetProperties(BindingFlags.Static | BindingFlags.Public);
foreach (PropertyInfo property in properties)
{
try
{
TestContext.WriteLine($" Verifying property {property.Name}");
verifier(property);
}
catch (Exception e)
{
throw new Exception(property.Name, e);
}
}
}
}
}
}
}
| 1 | 25,780 | minor: typo (knowMetricsTypes -> knownMetricsTypes) | NethermindEth-nethermind | .cs |
@@ -503,7 +503,13 @@ ConstValue* OptRangeSpec::getConstOperand(ItemExpr* predExpr, Lng32 constInx)
// currently support. Predicates involving types not yet supported will be
// treated as residual predicates.
if (QRDescGenerator::typeSupported(static_cast<ConstValue*>(right)->getType()))
+ {
+ /* add constvalue ‘not casespecific’ to type_*/
+ ((CharType*)getType())->setCaseinsensitive(
+ ((CharType *)(((ConstValue*)(right))->getType()))->isCaseinsensitive());
+
return static_cast<ConstValue*>(right);
+ }
else
return NULL;
} // getConstOperand() | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
#include <limits>
#include <float.h>
#include "nawstring.h"
#include "QRDescGenerator.h"
#include "NumericType.h"
#include "DatetimeType.h"
#include "QRLogger.h"
#include "OptRange.h"
#include "ItemLog.h"
#include "ComCextdecs.h"
double getDoubleValue(ConstValue* val, logLevel level);
/**
* Returns the Int64 value corresponding to the type of the ConstValue val.
* val can be of any numeric, datetime, or interval type. The returned value
* is used in the representation of a range of values implied by the predicates
* of a query for an exact numeric, datetime, or interval type.
*
* @param val ConstValue that wraps the value to be represented as an Int64.
* @param rangeColType The type of the column or expr the range is for.
* @param [out] truncated TRUE returned if the value returned was the result of
* truncating the input value. This can happen for floating
* point input, or exact numeric input that has greater
* scale than rangeColType.
* @param [out] valWasNegative TRUE returned if the input value was negative.
* Adjustment of truncated values is only done for
* positive values (because the truncation of a negative
* value adjusts it correctly). The caller can't just
* look at the returned value, because if it is 0, it
* may have been truncated from a small negative (-1 < n < 0)
* or a small positive (0 < n < 1) value.
* @param level Logging level to use in event of failure.
* @return The rangespec internal representation of the input constant value.
*/
static Int64 getInt64Value(ConstValue* val, const NAType* rangeColType,
NABoolean& truncated, NABoolean& valWasNegative,
logLevel level);
OptRangeSpec::OptRangeSpec(QRDescGenerator* descGenerator, CollHeap* heap, logLevel ll)
: RangeSpec(heap, ll),
descGenerator_(descGenerator),
rangeExpr_(NULL),
vid_(NULL_VALUE_ID),
isIntersection_(FALSE)
{
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
descGenerator, QRLogicException,
"OptRangeSpec constructed for null QRDescGenerator");
if (descGenerator->isDumpMvMode())
setDumpMvMode();
}
ValueId OptRangeSpec::getBaseCol(const ValueIdSet& vegMembers)
{
for (ValueId id=vegMembers.init(); vegMembers.next(id); vegMembers.advance(id))
{
if (id.getItemExpr()->getOperatorType() == ITM_BASECOLUMN)
return id;
}
return NULL_VALUE_ID;
}
ValueId OptRangeSpec::getBaseCol(const ValueId vegRefVid)
{
ItemExpr* itemExpr = vegRefVid.getItemExpr();
OperatorTypeEnum opType = itemExpr->getOperatorType();
// See if the vid is for a basecol instead of a vegref.
if (opType == ITM_BASECOLUMN)
return vegRefVid;
// Get the value id of the first member that is a base column.
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_,
opType == ITM_VEG_REFERENCE, QRDescriptorException,
"OptRangeSpec::getBaseCol() expected value id of a "
"vegref, not of op type -- %d", opType);
ValueId baseColVid = getBaseCol(static_cast<VEGReference*>(itemExpr)
->getVEG()->getAllValues());
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
baseColVid != NULL_VALUE_ID, QRDescriptorException,
"Vegref contains no base columns");
return baseColVid;
}
OptRangeSpec* OptRangeSpec::createRangeSpec(QRDescGenerator* descGenerator,
ItemExpr* predExpr,
CollHeap* heap,
NABoolean createForNormalizer)
{
QRTRACER("createRangeSpec");
OptRangeSpec* range = NULL;
if (predExpr->getOperatorType() == ITM_RANGE_SPEC_FUNC)
{
assertLogAndThrow(CAT_SQL_COMP_RANGE, LL_ERROR,
!createForNormalizer, QRDescriptorException,
"RangeSpecRef should not be present if creating for Normalizer");
RangeSpecRef* rangeIE = static_cast<RangeSpecRef*>(predExpr);
range = new(heap) OptRangeSpec(*rangeIE->getRangeObject(), heap);
// RangeSpecRefs are produced by the Normalizer. The rangespec they contain
// may have a vegref vid as the rangecolvalueid instead of using the first
// basecol vid as we do when a range is created in mvqr. Also, the vid may
// be that of a joinpred, but will still be stored as the rangecolvalueid.
// Below, we sort out these issues for the new rangespec we have created
// using the copy ctor on the one in the RangeSpecRef.
ValueId rcvid = range->getRangeColValueId();
if (rcvid != NULL_VALUE_ID)
{
ItemExpr* ie = rcvid.getItemExpr();
if (ie->getOperatorType() == ITM_VEG_REFERENCE)
{
if (descGenerator->isJoinPredId(rcvid))
{
range->setRangeColValueId(NULL_VALUE_ID);
range->setRangeJoinPredId(rcvid);
}
else
{
rcvid = range->getBaseCol(((VEGReference*)ie)->getVEG()->getAllValues());
if (rcvid != NULL_VALUE_ID)
range->setRangeColValueId(rcvid);
}
}
}
}
else
{
if (createForNormalizer)
{
range = new (heap) OptNormRangeSpec(descGenerator, heap);
(static_cast<OptNormRangeSpec*>(range))
->setOriginalItemExpr(predExpr);
}
else
range = new (heap) OptRangeSpec(descGenerator, heap);
if (!range->buildRange(predExpr))
{
delete range;
return NULL;
}
}
range->setID(predExpr->getValueId());
range->log();
return range;
}
// Protected copy ctor, used by clone().
OptRangeSpec::OptRangeSpec(const OptRangeSpec& other, CollHeap* heap)
: RangeSpec(other, heap),
descGenerator_(other.descGenerator_),
rangeExpr_(NULL),
vid_(other.vid_),
isIntersection_(other.isIntersection_)
{
// At this point the inherited heap ptr mvqrHeap_ has been initialized
// by the superclass ctor.
if (other.rangeExpr_)
rangeExpr_ = other.rangeExpr_->copyTree(mvqrHeap_);
}
QRRangePredPtr OptRangeSpec::createRangeElem()
{
QRTRACER("createRangeElem");
QRRangePredPtr rangePredElem =
new (mvqrHeap_) QRRangePred(ADD_MEMCHECK_ARGS(mvqrHeap_));
rangePredElem->setRangeItem(genRangeItem());
NABoolean rangeIsOnCol = (rangeJoinPredId_ != NULL_VALUE_ID ||
rangeColValueId_ != NULL_VALUE_ID);
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
getID()>0, QRDescriptorException,
"No id for range element in OptRangeSpec::createRangeElem().");
// The id of this rangespec is the value id of the original predicate on the
// corresponding range column/expr. If other preds on the range col/expr were
// found and had to be intersected, we need to use the value id of the itemexpr
// that is the result of the intersection (so the right predicat will be used
// for the rewrite).
if (isIntersection_)
rangePredElem->setID(getRangeItemExpr()->getValueId());
else
rangePredElem->setID(getID());
const NAType* typePtr = getType();
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
typePtr, QRDescriptorException,
"Call to getType() returned NULL in OptRangeSpec::createRangeElem().");
const NAType& type = *typePtr;
rangePredElem->setSqlType(type.getTypeSQLname());
QROpInequalityPtr ineqOp;
QROpEQPtr eqOp = NULL;
QROpBTPtr betweenOp;
CollIndex numSubranges = subranges_.entries();
for (CollIndex i=0; i<numSubranges; i++)
{
SubrangeBase& subrange = *subranges_[i];
if (subrange.startIsMin() ||
(i==0 && rangeIsOnCol && subrange.isMinForType(type)))
{
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_,
i==0, QRDescriptorException,
"Subrange other than 1st is unbounded on low side, "
"subrange index %d", i);
if (subrange.endIsMax() ||
(i==numSubranges-1 && rangeIsOnCol && subrange.isMaxForType(type)))
{
// Range spans all values of the type. If NULL is included as
// well, return NULL to indicate no range restriction. If NULL is
// not included in the range, an empty <RangePred> is used to
// indicate IS NOT NULL.
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
numSubranges==1, QRDescriptorException,
"Range of all values must have a single Subrange.");
if (nullIncluded_)
{
QRLogger::log(CAT_SQL_COMP_RANGE, LL_INFO,
"Range predicate ignored because it spans entire range + NULL.");
deletePtr(rangePredElem);
return NULL;
}
else
{
// An IS NOT NULL predicate on a NOT NULL column is removed in
// the early compilation stages. If we generate the usual empty
// range element to represent a predicate that spans all values,
// it will not match this missing predicate. So we detect and
// remove it.
QRElementPtr rangeItemElem = rangePredElem->getRangeItem()
->getReferencedElement();
if (rangeItemElem->getIDFirstChar() == 'C')
{
ValueId vid = rangeItemElem->getIDNum();
if ((static_cast<BaseColumn*>(vid.getItemExpr()))
->getNAColumn()->getNotNullNondroppable())
{
QRLogger::log(CAT_SQL_COMP_RANGE, LL_INFO,
"Range predicate ignored because it spans entire "
"range and has NOT NULL constraint.");
deletePtr(rangePredElem);
return NULL;
}
}
}
return rangePredElem; // leave it empty
}
if (subrange.endInclusive())
ineqOp = new (mvqrHeap_) QROpLE(ADD_MEMCHECK_ARGS(mvqrHeap_));
else
ineqOp = new (mvqrHeap_) QROpLS(ADD_MEMCHECK_ARGS(mvqrHeap_));
ineqOp->setValue(subrange.getEndScalarValElem(mvqrHeap_, type));
// If start is not min, we are here because lower bound for the type
// was start of first subrange.
ineqOp->setNormalized(!subrange.startIsMin());
rangePredElem->addOperator(ineqOp);
}
else if (subrange.endIsMax() ||
(i==numSubranges-1 && rangeIsOnCol
&& subrange.isMaxForType(type)))
{
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_,
i==numSubranges-1, QRDescriptorException,
"Subrange other than last is unbounded on high side, "
"subrange index %d",
i);
if (eqOp) // wrap this up if one in progress
{
rangePredElem->addOperator(eqOp);
eqOp = NULL;
}
if (subrange.startInclusive())
ineqOp = new (mvqrHeap_) QROpGE(ADD_MEMCHECK_ARGS(mvqrHeap_));
else
ineqOp = new (mvqrHeap_) QROpGT(ADD_MEMCHECK_ARGS(mvqrHeap_));
ineqOp->setValue(subrange.getStartScalarValElem(mvqrHeap_, type));
// If end is not max, we are here because upper bound for the type
// was end of last subrange.
ineqOp->setNormalized(!subrange.endIsMax());
rangePredElem->addOperator(ineqOp);
}
else if (subrange.isSingleValue())
{
// Add the value to a new OpEQ or the one we are already working on,
// but don't finish it until we hit something besides a single-value
// subrange.
if (!eqOp)
eqOp = new (mvqrHeap_) QROpEQ(ADD_MEMCHECK_ARGS(mvqrHeap_));
eqOp->addValue(subrange.getStartScalarValElem(mvqrHeap_, type));
}
else
{
// If values have been accumulated in an OpEQ, add it before doing
// the between op.
if (eqOp)
{
rangePredElem->addOperator(eqOp);
eqOp = NULL;
}
betweenOp = new (mvqrHeap_) QROpBT(ADD_MEMCHECK_ARGS(mvqrHeap_));
betweenOp->setStartValue(subrange.getStartScalarValElem(mvqrHeap_,
type));
betweenOp->setStartIncluded(subrange.startInclusive());
betweenOp->setEndValue(subrange.getEndScalarValElem(mvqrHeap_,
type));
betweenOp->setEndIncluded(subrange.endInclusive());
rangePredElem->addOperator(betweenOp);
}
}
// If IS NULL is part of the range spec, it should come last. If an OpEQ
// element is in progress, add it there, else create one for it. In either
// of these cases, add it to the range pred element.
if (eqOp)
{
if (nullIncluded_)
eqOp->setNullVal(new(mvqrHeap_) QRNullVal(ADD_MEMCHECK_ARGS(mvqrHeap_)));
rangePredElem->addOperator(eqOp);
}
else if (nullIncluded_)
{
eqOp = new (mvqrHeap_) QROpEQ(ADD_MEMCHECK_ARGS(mvqrHeap_));
eqOp->setNullVal(new(mvqrHeap_) QRNullVal(ADD_MEMCHECK_ARGS(mvqrHeap_)));
rangePredElem->addOperator(eqOp);
}
return rangePredElem;
} // createRangeElem()
QRElementPtr OptRangeSpec::genRangeItem()
{
QRElementPtr elem;
if (rangeColValueId_ != NULL_VALUE_ID)
elem = descGenerator_->genQRColumn(rangeColValueId_,
rangeJoinPredId_);
else if (rangeExpr_)
elem = descGenerator_->genQRExpr(rangeExpr_, rangeJoinPredId_);
else
{
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
rangeJoinPredId_ != NULL_VALUE_ID, QRDescriptorException,
"All range value ids are null");
ValueId colVid = getBaseCol(rangeJoinPredId_);
elem = descGenerator_->genQRColumn(colVid,
rangeJoinPredId_);
}
return elem;
}
ItemExpr* OptRangeSpec::getRangeExpr() const
{
if (rangeExpr_)
return rangeExpr_;
else if (rangeJoinPredId_ != NULL_VALUE_ID)
return ((ValueId)rangeJoinPredId_).getItemExpr();
else
return ((ValueId)rangeColValueId_).getItemExpr();
}
// Exprs have been converted to a canonical form in which const is the 2nd operand.
// NULL is returned if there is not a constant operand, or if the other operand
// is not the one the range is being built for.
ConstValue* OptRangeSpec::getConstOperand(ItemExpr* predExpr, Lng32 constInx)
{
QRTRACER("getConstOperand");
ItemExpr* left = predExpr->child(0);
ItemExpr* right = predExpr->child(constInx);
// Bail out if we don't have a constant. If a vegref, see if the veg includes
// a constant, and substitute that if so.
if (right->getOperatorType() != ITM_CONSTANT)
{
if (right->getOperatorType() == ITM_VEG_REFERENCE)
{
ValueId constVid = (static_cast<VEGReference*>(right))->getVEG()->getAConstant(TRUE);
if (constVid == NULL_VALUE_ID)
return NULL;
else
right = constVid.getItemExpr();
}
else
return NULL;
}
ValueId colVid;
switch (left->getOperatorType())
{
case ITM_VEG_REFERENCE:
if (forNormalizer())
colVid = left->getValueId();
else
colVid = getBaseCol(((VEGReference*)left)->getVEG()->getAllValues());
break;
case ITM_BASECOLUMN: // Should only happen for a check constraint
colVid = left->getValueId();
break;
default: // must be an expression
colVid = NULL_VALUE_ID;
break;
}
if (colVid != NULL_VALUE_ID)
{
// If this range is for an expression, the pred is not on it.
if (rangeExpr_)
return NULL;
if (rangeColValueId_ == NULL_VALUE_ID)
{
rangeColValueId_ = colVid;
EqualitySet* eqSet =
descGenerator_->getEqualitySet(&rangeColValueId_);
if (eqSet)
{
rangeJoinPredId_ = (QRValueId)eqSet->getJoinPredId();
setType(eqSet->getType());
}
else
{
rangeJoinPredId_ = (QRValueId)NULL_VALUE_ID;
setType(&((ValueId)rangeColValueId_).getType());
}
}
else if (rangeColValueId_ != colVid)
return NULL;
}
else
{
// The left side of the pred is an expression. If this range is for a
// simple column, it doesn't match.
if (rangeColValueId_ != NULL_VALUE_ID)
return NULL;
if (!rangeExpr_)
{
// If more than one node is involved in an expression, it is a
// residual pred instead of a range pred.
if (descGenerator_->getExprNode(left) == NULL_CA_ID)
return NULL;
setRangeExpr(left); // sets rangeExpr_, rangeExprText_
EqualitySet* eqSet =
descGenerator_->getEqualitySet(&rangeExprText_);
if (eqSet)
{
rangeJoinPredId_ = (QRValueId)eqSet->getJoinPredId();
setType(eqSet->getType());
}
else
{
rangeJoinPredId_ = (QRValueId)NULL_VALUE_ID;
setType(&rangeExpr_->getValueId().getType());
}
}
else
{
// The ItemExpr ptrs will be different, so we compare the expression
// text to see if they are the same. At some point this text will be
// in a canonical form that ignores syntactic variances.
NAString exprText;
left->unparse(exprText, OPTIMIZER_PHASE, MVINFO_FORMAT);
if (rangeExprText_ != exprText)
return NULL;
}
}
if (!(QRDescGenerator::typeSupported(getType())))
return NULL;
// If we reach this point, the predicate has been confirmed to apply to the
// same col/expr of this range, and the right operand has been confirmed to
// be a constant. Before returning the ConstValue, make sure it is a type we
// currently support. Predicates involving types not yet supported will be
// treated as residual predicates.
if (QRDescGenerator::typeSupported(static_cast<ConstValue*>(right)->getType()))
return static_cast<ConstValue*>(right);
else
return NULL;
} // getConstOperand()
void OptRangeSpec::addSubrange(ConstValue* start, ConstValue* end,
NABoolean startInclusive, NABoolean endInclusive)
{
QRTRACER("addSubrange");
const NAType* type = getType();
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
type, QRDescriptorException,
"Call to getType() returned NULL in OptRangeSpec::addSubrange().");
NAString unparsedStart(""), unparsedEnd("");
if (isDumpMvMode())
{
// Add the "official" unparsed text of the expression as a sub-element.
if (start)
start->unparse(unparsedStart, OPTIMIZER_PHASE, QUERY_FORMAT);
if (end)
end->unparse(unparsedEnd, OPTIMIZER_PHASE, QUERY_FORMAT);
}
NABuiltInTypeEnum typeQual = type->getTypeQualifier();
switch (typeQual)
{
case NA_NUMERIC_TYPE:
case NA_DATETIME_TYPE:
case NA_INTERVAL_TYPE:
case NA_BOOLEAN_TYPE:
//if (((const NumericType*)type)->isExact())
if (typeQual == NA_DATETIME_TYPE ||
typeQual == NA_INTERVAL_TYPE ||
(typeQual == NA_NUMERIC_TYPE &&
static_cast<const NumericType*>(type)->isExact()) ||
(typeQual == NA_BOOLEAN_TYPE))
{
// Fixed-point numeric subranges are normalized to be inclusive, to
// simplify equivalence and subsumption checks.
Subrange<Int64>* sub = new (mvqrHeap_) Subrange<Int64>(logLevel_);
sub->setUnparsedStart(unparsedStart);
sub->setUnparsedEnd(unparsedEnd);
NABoolean valTruncated;
NABoolean valNegative;
NABoolean startOverflowed = FALSE;
NABoolean endOverflowed = FALSE;
if (start)
{
// If the constant is truncated because it has higher scale than
// the type of the range col/expr, the truncated value is not the
// start of the range. 1 is added to the truncated value to get
// the next value that is possible for the type.
sub->start = getInt64Value(start, type, valTruncated, valNegative, logLevel_);
if ((!startInclusive || valTruncated) &&
(!valTruncated || !valNegative))
sub->makeStartInclusive(type, startOverflowed);
}
else
sub->setStartIsMin(TRUE);
if (end)
{
// If the constant is truncated because it has higher scale than
// the type of the range col/expr, the truncated value must be
// included in the range even if the end is not inclusive (<).
sub->end = getInt64Value(end, type, valTruncated, valNegative, logLevel_);
if ((!endInclusive && !valTruncated) ||
(valTruncated && valNegative))
sub->makeEndInclusive(type, endOverflowed);
}
else
sub->setEndIsMax(TRUE);
// If not originally inclusive, has been adjusted above.
// Need this in case makeXXXInclusive was not called, but leave as
// is if made noninclusive because of positive (for start) or
// negative (for end) overflow.
if (!startOverflowed)
sub->setStartInclusive(TRUE);
if (!endOverflowed)
sub->setEndInclusive(TRUE);
// Handling a constant with scale that exceeds that of the range
// column could result in an empty (i.e., start>end) range. For example,
// if n is a numeric(4,2), the predicate n = 12.341 will result in
// the range 12.35..12.34, which is appropriate since the predicate
// is guaranteed to be false by the type constraint of the column.
if (sub->startIsMin() || sub->endIsMax() || sub->start <= sub->end)
placeSubrange(sub);
else
delete sub;
}
else
{
Subrange<double>* sub = new (mvqrHeap_) Subrange<double>(logLevel_);
sub->setUnparsedStart(unparsedStart);
sub->setUnparsedEnd(unparsedEnd);
if (start)
sub->start = getDoubleValue(start, logLevel_);
else
sub->setStartIsMin(TRUE);
if (end)
sub->end = getDoubleValue(end, logLevel_);
else
sub->setEndIsMax(TRUE);
sub->setStartInclusive(startInclusive);
sub->setEndInclusive(endInclusive);
placeSubrange(sub);
}
break;
// In some cases, constant folding of char expressions produces a varchar
// constant, so we have to take a possible length field into account.
case NA_CHARACTER_TYPE:
{
Lng32 headerBytes;
const NAType* startType = (start ? start->getType() : NULL);
const NAType* endType = (end ? end->getType() : NULL);
// Alignment is 2 for UCS2, 1 for single-byte char set.
if (type->getDataAlignment() == 2)
{
// Unicode string.
Subrange<RangeWString>* sub = new (mvqrHeap_) Subrange<RangeWString>(logLevel_);
sub->setUnparsedStart(unparsedStart);
sub->setUnparsedEnd(unparsedEnd);
if (start)
{
headerBytes = startType->getVarLenHdrSize() +
startType->getSQLnullHdrSize();
sub->start.remove(0)
.append((const NAWchar*)start->getConstValue()
+ (headerBytes / sizeof(NAWchar)),
(start->getStorageSize() - headerBytes)
/ sizeof(NAWchar));
}
else
sub->setStartIsMin(TRUE);
if (end)
{
headerBytes = endType->getVarLenHdrSize() +
endType->getSQLnullHdrSize();
sub->end.remove(0)
.append((const NAWchar*)end->getConstValue()
+ (headerBytes / sizeof(NAWchar)),
(end->getStorageSize() - headerBytes)
/ sizeof(NAWchar));
}
else
sub->setEndIsMax(TRUE);
sub->setStartInclusive(startInclusive);
sub->setEndInclusive(endInclusive);
placeSubrange(sub);
}
else
{
// Latin1 string.
Subrange<RangeString>* sub = new (mvqrHeap_) Subrange<RangeString>(logLevel_);
sub->setUnparsedStart(unparsedStart);
sub->setUnparsedEnd(unparsedEnd);
if (start)
{
headerBytes = startType->getVarLenHdrSize() +
startType->getSQLnullHdrSize();
sub->start.remove(0)
.append((const char*)start->getConstValue() + headerBytes,
start->getStorageSize() - headerBytes);
}
else
sub->setStartIsMin(TRUE);
if (end)
{
headerBytes = endType->getVarLenHdrSize() +
endType->getSQLnullHdrSize();
sub->end.remove(0)
.append((const char*)end->getConstValue() + headerBytes,
end->getStorageSize() - headerBytes);
}
else
sub->setEndIsMax(TRUE);
sub->setStartInclusive(startInclusive);
sub->setEndInclusive(endInclusive);
placeSubrange(sub);
}
}
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_,
FALSE, QRDescriptorException,
"Unhandled data type: %d", typeQual);
break;
}
} // addSubrange(ConstValue*...
ItemExpr* OptRangeSpec::getCheckConstraintPred(ItemExpr* checkConstraint)
{
if (checkConstraint->getOperatorType() != ITM_CASE)
{
QRLogger::log(CAT_SQL_COMP_RANGE, logLevel_,
"Expected ITM_CASE but found operator %d.",
checkConstraint->getOperatorType());
return NULL;
}
ItemExpr* itemExpr = checkConstraint->child(0);
if (itemExpr->getOperatorType() != ITM_IF_THEN_ELSE)
{
QRLogger::log(CAT_SQL_COMP_RANGE, logLevel_,
"Expected ITM_IF_THEN_ELSE but found operator %d.",
itemExpr->getOperatorType());
return NULL;
}
// Child of the if-then-else is either is_false, which is the parent of the
// predicate (for most check constraints), or the predicate itself (for
// isnotnull or the check option of a view).
itemExpr = itemExpr->child(0);
if (itemExpr->getOperatorType() == ITM_IS_FALSE)
return itemExpr->child(0);
else
return itemExpr;
}
void OptRangeSpec::intersectCheckConstraints(QRDescGenerator* descGen,
ValueId colValId)
{
QRTRACER("intersectCheckConstraints");
// Check and Not Null constraints.
//
ItemExpr* itemExpr = colValId.getItemExpr();
if (itemExpr->getOperatorType() == ITM_VEG_REFERENCE)
{
// For a vegref, intersect all constraints applied to any member.
const ValueIdSet& vidSet = static_cast<VEGReference*>(itemExpr)
->getVEG()->getAllValues();
for (ValueId vid=vidSet.init(); vidSet.next(vid); vidSet.advance(vid))
{
if (vid.getItemExpr()->getOperatorType() == ITM_BASECOLUMN)
intersectCheckConstraints(descGen, vid);
}
return;
}
else if (itemExpr->getOperatorType() != ITM_BASECOLUMN)
{
QRLogger::log(CAT_SQL_COMP_RANGE, logLevel_,
"Nonfatal unexpected result: range column operator type is "
"%d instead of ITM_BASECOLUMN.", itemExpr->getOperatorType());
return;
}
#ifdef _DEBUG
const NATable* tbl = colValId.getNAColumn()->getNATable();
const CheckConstraintList& checks = tbl->getCheckConstraints();
for (CollIndex c=0; c<checks.entries(); c++)
{
QRLogger::log(CAT_SQL_COMP_RANGE, LL_DEBUG,
"Check constraint on table %s: %s",
tbl->getTableName().getObjectName().data(),
checks[c]->getConstraintText().data());
}
#endif
OptRangeSpec* checkRange = NULL;
ItemExpr* checkPred;
const ValueIdList& checkConstraints =
(static_cast<BaseColumn*>(itemExpr))->getTableDesc()->getCheckConstraints();
for (CollIndex i=0; i<checkConstraints.entries(); i++)
{
checkPred = getCheckConstraintPred(checkConstraints[i].getItemExpr());
if (checkPred)
{
checkRange = new(mvqrHeap_) OptRangeSpec(descGen, mvqrHeap_);
checkRange->setRangeColValueId(colValId);
checkRange->setType(colValId.getType().newCopy(mvqrHeap_));
if (checkRange->buildRange(checkPred))
// Call the RangeSpec version of intersect; this avoids trying to
// modify the original ItemExpr with the check constraint pred.
RangeSpec::intersectRange(checkRange);
delete checkRange;
}
}
}
// Add type-implied constraint for numeric, datetime, and interval types.
void OptRangeSpec::intersectTypeConstraint(QRDescGenerator* descGen,
ValueId colValId)
{
QRTRACER("intersectTypeConstraint");
NABoolean isExact;
const NAType& colType = colValId.getType();
NABuiltInTypeEnum typeQual = colType.getTypeQualifier();
switch (typeQual)
{
case NA_NUMERIC_TYPE:
{
const NumericType& numType = static_cast<const NumericType&>(colType);
isExact = numType.isExact();
if (isExact && numType.getFSDatatype() == REC_BIN64_SIGNED)
return; // No type restriction for 64-bit integers
}
break;
case NA_DATETIME_TYPE:
case NA_INTERVAL_TYPE:
isExact = TRUE;
break;
default:
return; // No type constraint applied for other types
}
OptRangeSpec* typeRange = NULL;
if (isExact) // Exact numeric, datetime, interval
{
// Add the subrange implied by the type. If the type is largeint,
// nothing is needed here and no type range will be created. We don't
// need to set the type of the range specs created in this function,
// because addSubrange (which looks at the type) is bypassed and we call
// placeSubrange directly.
typeRange = new(mvqrHeap_) OptRangeSpec(descGen, mvqrHeap_);
Int64 start, end;
SubrangeBase::getExactNumericMinMax(colType, start, end, logLevel_);
Subrange<Int64>* numSubrange = new(mvqrHeap_) Subrange<Int64>(logLevel_);
numSubrange->setUnparsedStart("");
numSubrange->setUnparsedEnd("");
numSubrange->start = start;
numSubrange->end = end;
numSubrange->setStartIsMin(FALSE);
numSubrange->setEndIsMax(FALSE);
numSubrange->setStartInclusive(TRUE);
numSubrange->setEndInclusive(TRUE);
typeRange->placeSubrange(numSubrange);
}
else // approximate numeric
{
switch (colType.getFSDatatype())
{
case REC_IEEE_FLOAT32:
{
typeRange = new(mvqrHeap_) OptRangeSpec(descGen, mvqrHeap_);
Subrange<double>* dblSubrange = new(mvqrHeap_) Subrange<double>(logLevel_);
dblSubrange->end = static_cast<const NumericType&>(colType).getMaxValue();
dblSubrange->setUnparsedStart("");
dblSubrange->setUnparsedEnd("");
dblSubrange->start = -(dblSubrange->end);
dblSubrange->setStartIsMin(FALSE);
dblSubrange->setEndIsMax(FALSE);
dblSubrange->setStartInclusive(TRUE);
dblSubrange->setEndInclusive(TRUE);
typeRange->placeSubrange(dblSubrange);
}
break;
case REC_IEEE_FLOAT64:
// No range restriction needed.
break;
default:
QRLogger::log(CAT_SQL_COMP_RANGE, logLevel_,
"No case in intersectTypeConstraint() for "
"approximate numeric of type %d",
colType.getFSDatatype());
break;
}
}
if (typeRange)
{
typeRange->setNullIncluded(TRUE); // Null always part of a type range
// Call the RangeSpec version of intersect; this avoids trying to
// modify the original ItemExpr with the type constraint pred.
RangeSpec::intersectRange(typeRange);
delete typeRange;
}
} // intersectTypeConstraint()
void OptRangeSpec::addConstraints(QRDescGenerator* descGen)
{
QRTRACER("addConstraints");
ValueId colValId = getRangeColValueId();
if (colValId == NULL_VALUE_ID)
colValId = getRangeJoinPredId();
if (colValId == NULL_VALUE_ID)
return;
// Add constraint implied by the column's type.
intersectTypeConstraint(descGen, colValId);
// Check constraints can be added and dropped at will, and so can be in
// different states when the MV is created and when the query is matched.
// Therefore we utilize check constraints only for query descriptors. This
// may result in a NotProvided instead of a Provided match, but avoids the
// need to invalidate all the MV descriptors using a table when one of the
// table's check constraints is added/dropped.
if (descGen->getDescriptorType() == ET_QueryDescriptor)
intersectCheckConstraints(descGen, colValId);
// else
// assertLogAndThrow1(descGen->getDescriptorType() == ET_MVDescriptor,
// QRDescriptorException,
// "Invalid descriptor type -- %d",
// descGen->getDescriptorType());
}
void OptRangeSpec::addColumnsUsed(const QRDescGenerator* descGen)
{
if (descGen != descGenerator_)
descGenerator_->mergeDescGenerator(descGen);
}
#define AVR_STATE0 0
#define AVR_STATE1 1
#define AVR_STATE2 2
NABoolean OptRangeSpec::buildRange(ItemExpr* origPredExpr)
{
QRTRACER("buildRange");
ConstValue *startValue, *endValue;
OperatorTypeEnum leftOp, rightOp;
NABoolean isRange = TRUE;
NABoolean reprocessAND = FALSE;
//
// buildRange() can be called recursively for all the items in an IN-list
// at a point in time when we are already many levels deep in other
// recursion (e.g. Scan::applyAssociativityAndCommutativity() ). Consequently,
// we may not have much of our stack space available at the time, so
// we must eliminate the recursive calls to buildRange() by keeping the
// information needed by each "recursive" level in the heap and using
// a "while" loop to look at each node in the tree in the same order as
// the old recursive technique would have done.
// The information needed by each "recursive" level is basically just
// a pointer to what node (ItemExpr *) to look at next and a "state" value
// that tells us where we are in the buildRange() code for the ItemExpr
// node that we are currently working on.
//
ARRAY( ItemExpr * ) IEarray(mvqrHeap_, 10) ; //Initially 10 elements (no particular reason to choose 10)
ARRAY( Int16 ) state(mvqrHeap_, 10) ; //These ARRAYs will grow automatically as needed.)
Int32 currIdx = 0 ;
IEarray.insertAt( currIdx, origPredExpr ) ; //Initialize 1st element in the ARRAYs
state.insertAt( currIdx, AVR_STATE0 ) ;
while( currIdx >= 0 && isRange )
{
ItemExpr * predExpr = IEarray[currIdx] ; //Get ptr to the current IE
OperatorTypeEnum op = predExpr->getOperatorType();
switch (op)
{
case ITM_AND:
// Check for a bounded subrange, from BETWEEN predicate, etc. If not of
// this form, the predicate was presumably too complex to convert to
// conjunctive normal form without a combinatorial explosion of clauses,
// and we handle it by creating separate range objects for each operand
// of the AND, intersecting them, and then unioning the result with the
// primary range.
leftOp = predExpr->child(0)->getOperatorType();
rightOp = predExpr->child(1)->getOperatorType();
if (leftOp == ITM_LESS || leftOp == ITM_LESS_EQ)
{
if (rightOp == ITM_GREATER || rightOp == ITM_GREATER_EQ)
{
endValue = getConstOperand(predExpr->child(0));
if (endValue)
{
startValue = getConstOperand(predExpr->child(1));
if (startValue)
addSubrange(startValue, endValue,
rightOp == ITM_GREATER_EQ,
leftOp == ITM_LESS_EQ);
else
isRange = FALSE;
}
else
isRange = FALSE;
}
else
reprocessAND = TRUE;
}
else if (leftOp == ITM_GREATER || leftOp == ITM_GREATER_EQ)
{
if (rightOp == ITM_LESS || rightOp == ITM_LESS_EQ)
{
startValue = getConstOperand(predExpr->child(0));
if (startValue)
{
endValue = getConstOperand(predExpr->child(1));
if (endValue)
addSubrange(startValue, endValue,
leftOp == ITM_GREATER_EQ,
rightOp == ITM_LESS_EQ);
else
isRange = FALSE;
}
else
isRange = FALSE;
}
else
reprocessAND = TRUE;
}
else
reprocessAND = TRUE;
// AND was used in a sense other than that of a BETWEEN predicate, so
// we must intersect the operand ranges before adding the result to the
// overall range.
if (reprocessAND)
{
OptRangeSpec *leftRange = NULL, *rightRange = NULL;
leftRange = createRangeSpec(descGenerator_, predExpr->child(0),
mvqrHeap_, forNormalizer());
if (!leftRange)
isRange = FALSE;
else if (!rangeSubjectIsSet())
setRangeSubject(leftRange);
else if (!sameRangeSubject(leftRange))
isRange = FALSE;
if (isRange)
{
rightRange = createRangeSpec(descGenerator_, predExpr->child(1),
mvqrHeap_, forNormalizer());
if (rightRange && rightRange->sameRangeSubject(leftRange))
{
leftRange->intersectRange(rightRange);
// Call only the superclass part of unionRange(); we want
// to avoid the part that modifies the originalItemExpr_;
RangeSpec::unionRange(leftRange);
}
else
isRange = FALSE;
}
delete leftRange;
delete rightRange;
}
break;
case ITM_OR:
if ( state[currIdx] == AVR_STATE0 )
{
state.insertAt( currIdx, AVR_STATE1 ) ;
currIdx++ ; //"Recurse" down to child 0
state.insertAt( currIdx, AVR_STATE0 ) ; // and start that child's state at 0
IEarray.insertAt( currIdx, predExpr->child(0) ) ;
continue ;
}
else if ( state[currIdx] == AVR_STATE1 )
{
state.insertAt( currIdx, AVR_STATE2 ) ;
currIdx++ ; //"Recurse" down to child 1
state.insertAt( currIdx, AVR_STATE0 ) ; // and start that child's state at 0
IEarray.insertAt( currIdx, predExpr->child(1) ) ;
continue ;
}
else
state.insertAt( currIdx, AVR_STATE0 ); // We are done processing predExpr
break ;
case ITM_EQUAL:
startValue = endValue = getConstOperand(predExpr);
if (startValue)
addSubrange(startValue, endValue, TRUE, TRUE);
else
isRange = FALSE;
break;
case ITM_LESS:
endValue = getConstOperand(predExpr);
if (endValue)
addSubrange(NULL, endValue, TRUE, FALSE);
else
isRange = FALSE;
break;
case ITM_LESS_EQ:
endValue = getConstOperand(predExpr);
if (endValue)
addSubrange(NULL, endValue, TRUE, TRUE);
else
isRange = FALSE;
break;
case ITM_GREATER:
startValue = getConstOperand(predExpr);
if (startValue)
addSubrange(startValue, NULL, FALSE, TRUE);
else
isRange = FALSE;
break;
case ITM_GREATER_EQ:
startValue = getConstOperand(predExpr);
if (startValue)
addSubrange(startValue, NULL, TRUE, TRUE);
else
isRange = FALSE;
break;
case ITM_NOT_EQUAL:
startValue = endValue = getConstOperand(predExpr);
if (startValue)
{
addSubrange(NULL, endValue, TRUE, FALSE);
addSubrange(startValue, NULL, FALSE, TRUE);
}
else
isRange = FALSE;
break;
case ITM_BETWEEN:
startValue = getConstOperand(predExpr);
if (startValue)
{
endValue = getConstOperand(predExpr, 2);
if (endValue)
//@ZX The Between class has private member variables
// leftBoundryIncluded_ and rightBoundryIncluded_ that have
// no accessor functions. If it is in fact possible for the
// boundaries to be noninclusive, we will need access to
// these properties.
addSubrange(startValue, endValue, TRUE, TRUE);
}
else
isRange = FALSE;
break;
case ITM_IS_NULL:
case ITM_IS_NOT_NULL:
{
ValueId colVid;
ItemExpr* child = predExpr->child(0);
if (child->getOperatorType() == ITM_VEG_REFERENCE)
{
// If this range is for an expression, the pred is not on it.
if (rangeExpr_)
isRange = FALSE;
else
{
VEG* veg = ((VEGReference*)child)->getVEG();
if (forNormalizer())
colVid = veg->getVEGReference()->getValueId();
else
colVid = getBaseCol(veg->getAllValues());
if (rangeColValueId_ == NULL_VALUE_ID)
{
rangeColValueId_ = colVid;
EqualitySet* eqSet =
descGenerator_->getEqualitySet(&rangeColValueId_);
if (eqSet)
{
rangeJoinPredId_ = (QRValueId)eqSet->getJoinPredId();
setType(eqSet->getType());
}
else
{
rangeJoinPredId_ = (QRValueId)NULL_VALUE_ID;
setType(&((ValueId)rangeColValueId_).getType());
}
}
else if (rangeColValueId_ != colVid)
isRange = FALSE;
}
}
else
{
// The left side of the pred is an expression. If this range is
// for a simple column, it doesn't match.
if (rangeColValueId_ != NULL_VALUE_ID)
isRange = FALSE;
else if (!rangeExpr_)
{
setRangeExpr(child); // sets rangeExpr_, rangeExprText_
EqualitySet* eqSet =
descGenerator_->getEqualitySet(&rangeExprText_);
if (eqSet)
{
rangeJoinPredId_ = (QRValueId)eqSet->getJoinPredId();
setType(eqSet->getType());
}
else
{
rangeJoinPredId_ = (QRValueId)NULL_VALUE_ID;
setType(&rangeExpr_->getValueId().getType());
}
}
else
{
NAString exprText;
child->unparse(exprText, OPTIMIZER_PHASE, MVINFO_FORMAT);
if (rangeExprText_ != exprText)
isRange = FALSE;
}
}
// Now that it has been validated as a range pred, take the appropriate
// action depending on whether the op is IS NULL or IS NOT NULL.
if (isRange)
{
if (op == ITM_IS_NULL)
nullIncluded_ = TRUE;
else
{
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_, op == ITM_IS_NOT_NULL,
QRLogicException,
"op must be ITM_IS_NOT_NULL here");
addSubrange(NULL, NULL, TRUE, TRUE);
}
}
}
break;
default:
isRange = FALSE;
break;
}
if ( state[currIdx] == AVR_STATE0 )
currIdx-- ; // Go back to the parent node & continue working on it.
}
return isRange;
} // buildRange()
// Local helper function to cast int64, which, as the widest integral type is
// used for rangespec processing, to the actual type of the column the rangespec
// applies to. numBuf is declared as Int64* to ensure proper alignment for all
// possible integral types.
static void downcastRangespecInt(Int64 val, Lng32 scale, NAType*& type,
Int64* numBuf, NAMemory* heap)
{
Lng32 precision = 0; // Only used for non-integral exact numeric
// For non-integer values with leading 0's, have to adjust precision to
// equal scale. For example, the raw value 23 with a scale of 4 (.0023)
// will initially be calculated to have precision 2 instead of 4.
if (scale) // non-integral value
{
precision = (Lng32)log10(::abs((double)val)) + 1;
if (scale > precision)
precision = scale;
}
if (val <= SHRT_MAX && val >= SHRT_MIN)
{
*((Int16*)numBuf) = static_cast<Int16>(val);
if (scale == 0)
type = new(heap) SQLSmall(heap, TRUE, FALSE);
else
type = new(heap) SQLNumeric(heap, sizeof(Int16), precision, scale,
TRUE, FALSE);
}
else if (val <= INT_MAX && val >= INT_MIN)
{
*((Int32*)numBuf) = static_cast<Int32>(val);
if (scale == 0)
type = new(heap) SQLInt(heap, TRUE, FALSE);
else
type = new(heap) SQLNumeric(heap, sizeof(Int32), precision, scale,
TRUE, FALSE);
}
else
{
*numBuf = val;
if(scale == 0)
type = new(heap) SQLLargeInt(heap, TRUE, FALSE);
else
type = new(heap) SQLNumeric(heap, sizeof(Int64), precision, scale,
TRUE, FALSE);
}
}
// Instantiate a ConstValue using the actual type of the passed Int64 value.
// The actual type is indicated by the type parameter, and could be any type
// represented as an integer in a rangespec.
ConstValue* OptRangeSpec::reconstituteInt64Value(NAType* type, Int64 val) const
{
// Use these for the textual representation of a constant value, which is
// passed to the ConstValue ctor.
char constValTextBuffer[50];
NAString constValTextStr;
NABuiltInTypeEnum typeQual = type->getTypeQualifier();
switch (typeQual)
{
case NA_NUMERIC_TYPE:
{
sprintf(constValTextBuffer, PF64, val);
constValTextStr = constValTextBuffer;
NumericType* numType = static_cast<NumericType*>(type);
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
numType->isExact(),
QRLogicException,
"Expecting exact numeric type in "
"reconstituteInt64Value");
Int64 numBuf;
NAType* constType = NULL;
downcastRangespecInt(val, numType->getScale(), constType, &numBuf, mvqrHeap_);
return new(mvqrHeap_) ConstValue(constType, &numBuf,
constType->getNominalSize(),
&constValTextStr, mvqrHeap_);
}
break;
case NA_DATETIME_TYPE:
{
DatetimeType* dtType = static_cast<DatetimeType*>(type);
ULng32 tsFieldValues[DatetimeValue::N_DATETIME_FIELDS];
DatetimeValue dtv("", 0);
switch (dtType->getSubtype())
{
case DatetimeType::SUBTYPE_SQLDate:
// Since Julian timestamp is calculated from noon on the base day,
// the timestamp corresponding to a given date (without time) is
// always x.5 days, where x+1 is the actual number of days passed.
// We truncate the fractional part when we store it, so it has to
// be added back here. .5 could be added, but to be safe we add a
// full day minus a microsecond, which is then truncated to the
// proper value.
DatetimeValue::decodeTimestamp
((val+1) * SubrangeBase::MICROSECONDS_IN_DAY - 1,
dtType->getFractionPrecision(), tsFieldValues);
dtv.setValue(REC_DATE_YEAR, REC_DATE_DAY,
dtType->getFractionPrecision(), tsFieldValues);
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
dtv.isValid(), QRLogicException,
"Invalid date value reconstructed from Julian timestamp");
constValTextStr = dtv.getValueAsString(*dtType);
return new(mvqrHeap_) ConstValue(new(mvqrHeap_)SQLDate(mvqrHeap_, FALSE),
(void*)dtv.getValue(), dtv.getValueLen(),
&constValTextStr, mvqrHeap_);
break;
case DatetimeType::SUBTYPE_SQLTime:
// The fractional seconds part is represented not by a number
// of microseconds but by the numerator of the fraction having
// denominator equal to 10^fractionPrecision.
tsFieldValues[DatetimeValue::FRACTION] =
(ULng32)((val % 1000000)
/ (Int64)pow(10, 6 - dtType->getFractionPrecision()));
val /= 1000000;
tsFieldValues[DatetimeValue::SECOND] = (ULng32)(val % 60);
val /= 60;
tsFieldValues[DatetimeValue::MINUTE] = (ULng32)(val % 60);
val /= 60;
tsFieldValues[DatetimeValue::HOUR] = (ULng32)val;
dtv.setValue(REC_DATE_HOUR, REC_DATE_SECOND,
dtType->getFractionPrecision(), tsFieldValues);
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
dtv.isValid(), QRLogicException,
"Invalid time value reconstructed from Int64 value");
constValTextStr = dtv.getValueAsString(*dtType);
return new(mvqrHeap_)
ConstValue(new(mvqrHeap_)SQLTime(mvqrHeap_, FALSE,
dtType->getFractionPrecision()),
(void*)dtv.getValue(), dtv.getValueLen(),
&constValTextStr, mvqrHeap_);
break;
case DatetimeType::SUBTYPE_SQLTimestamp:
// We represent these as a number of microseconds, so fractional
// precision does not have to be taken into account.
DatetimeValue::decodeTimestamp(val,
dtType->getFractionPrecision(),
tsFieldValues);
dtv.setValue(REC_DATE_YEAR, REC_DATE_SECOND,
dtType->getFractionPrecision(), tsFieldValues);
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
dtv.isValid(), QRLogicException,
"Invalid timestamp value reconstructed from Julian timestamp");
constValTextStr = dtv.getValueAsString(*dtType);
return new(mvqrHeap_) ConstValue(new(mvqrHeap_)
SQLTimestamp(mvqrHeap_, FALSE,
dtType->getFractionPrecision()),
(void*)dtv.getValue(),
dtv.getValueLen(),
&constValTextStr, mvqrHeap_);
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_,
FALSE, QRLogicException,
"Unknown datetime subtype -- %d",
dtType->getSubtype());
return NULL;
break;
}
}
break;
case NA_INTERVAL_TYPE:
{
Int64 origVal = val; // Use this later to check for precision loss.
Int64 scaledVal; // Compare to origVal to check precision loss.
Int64 scaleFactor;
IntervalType* intvlType = static_cast<IntervalType*>(type);
// For rangespec analysis, all values are converted to months or
// microseconds. Change it back to its normal internal representation
// in terms of units of the end field.
switch (intvlType->getEndField())
{
case REC_DATE_YEAR:
val /= 12;
scaledVal = val * 12;
break;
case REC_DATE_MONTH:
// No conversion necessary.
scaledVal = val;
break;
case REC_DATE_DAY:
val /= (24LL * 60 * 60000000);
scaledVal = val * (24LL * 60 * 60000000);
break;
case REC_DATE_HOUR:
val /= (60LL * 60000000);
scaledVal = val * (60LL * 60000000);
break;
case REC_DATE_MINUTE:
val /= 60000000LL;
scaledVal = val * 60000000;
break;
case REC_DATE_SECOND:
scaleFactor = (Int64)pow(10, 6 - intvlType->getFractionPrecision());
val /= scaleFactor;
scaledVal = val * scaleFactor;
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_,
FALSE, QRLogicException,
"Invalid end field for interval type -- %d",
intvlType->getEndField());
break;
}
// Calculate the leading field precision of the interval constant, when
// converted to units of its end field.
UInt32 leadingPrec = 0;
Int64 tempVal = (val >= 0 ? val : -val); // abs has no overload for Int64
while (tempVal > 0)
{
leadingPrec++;
tempVal /= 10;
}
// If the field is seconds, some of the digits we just counted may be
// fractional seconds, so we subtract them from what we counted.
if (intvlType->getEndField() == REC_DATE_SECOND)
{
UInt32 fracSecPrec = intvlType->getFractionPrecision();
if (leadingPrec <= fracSecPrec) // could be < for a fraction of a
leadingPrec = 2; // sec with leading zeroes
else
leadingPrec -= fracSecPrec;
}
// There are cases where we can come up with leading precision of 0,
// and this causes a problem. Make it min 2, which is the default.
if (leadingPrec < 2)
leadingPrec = 2;
// Any discrepancy in precision between the range column and the
// constant value should have been dealt with when the value was
// converted to months or microseconds in getInt64ValueFromInterval().
assertLogAndThrow2(CAT_SQL_COMP_RANGE, logLevel_,
origVal == scaledVal, QRLogicException,
"Precision lost in conversion to interval type: "
"value in microseconds (or months) = %Ld, "
"interval end field = %d",
origVal, intvlType->getEndField());
// IntervalValue::setValue() will cast the Int64 value to the appropriate
// type if you pass the length of that type, but negative values seem
// to be stored incorrectly unless they are 8 bytes (see bug 2773), so
// we just use 8 bytes always.
IntervalValue intvlVal(NULL, 0);
intvlVal.setValue(val, SQL_LARGE_SIZE);
constValTextStr = intvlVal.getValueAsString(*intvlType);
// The interval type for the constant is derived from that of the column
// the predicate is on, but is always a single field (the end field of
// the column type), and has the maximum possible leading field precision
// to prevent overflow in case the value is outside the range for the
// column's declared type (can happen when the rangespec is created for
// the Normalizer rather than MVQR, because type constraints are not
// incorporated in that case). See bug 2974.
return new(mvqrHeap_) ConstValue(new(mvqrHeap_)SQLInterval
(mvqrHeap_, FALSE,
intvlType->getEndField(),
leadingPrec,
intvlType->getEndField(),
intvlType->getFractionPrecision()),
(void*)intvlVal.getValue(),
intvlVal.getValueLen(),
&constValTextStr, mvqrHeap_);
}
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_, FALSE, QRLogicException,
"Type not handled by reconstituteInt64Value() -- %d",
typeQual);
return NULL;
break;
}
// make the compiler happy
return NULL;
} // reconstituteInt64Value()
// Instantiate a ConstValue using the passed double value.
ConstValue* OptRangeSpec::reconstituteDoubleValue(NAType* type, Float64 val) const
{
NABuiltInTypeEnum typeQual = type->getTypeQualifier();
if (typeQual != NA_NUMERIC_TYPE)
{
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_, FALSE, QRLogicException,
"Type not handled by reconstituteDoubleValue() -- %d",
typeQual);
return NULL;
}
// Use these for the textual representation of a constant value, which is
// passed to the ConstValue ctor.
char constValTextBuffer[50];
NAString constValTextStr;
sprintf(constValTextBuffer, "%g", val);
constValTextStr = constValTextBuffer;
NumericType* numType = static_cast<NumericType*>(type);
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
!numType->isExact(),
QRLogicException,
"Expecting approximate numeric type in "
"reconstituteDoubleValue");
NAType* constType = new(mvqrHeap_) SQLDoublePrecision(mvqrHeap_, FALSE);
return new(mvqrHeap_) ConstValue(constType, &val, constType->getNominalSize(),
&constValTextStr, mvqrHeap_);
} // reconstituteDoubleValue()
// Generate a left-linear OR backbone of equality predicates corresponding
// to the values within the subrange.
ItemExpr* OptRangeSpec::makeSubrangeORBackbone(SubrangeBase* subrange,
ItemExpr* subrangeItem) const
{
QRTRACER("makeSubrangeOrBackbone");
NAType* type = getType()->newCopy(mvqrHeap_);
NAType* int64Type = new (mvqrHeap_) SQLLargeInt(mvqrHeap_, TRUE, FALSE );
type->resetSQLnullFlag();
type->resetSQLnullHdrSize();
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
type, QRDescriptorException,
"Call to getType() returned NULL in "
"OptRangeSpec::makeSubrangeOrBackbone().");
NABuiltInTypeEnum typeQual = type->getTypeQualifier();
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_,
typeQual == NA_DATETIME_TYPE ||
typeQual == NA_INTERVAL_TYPE ||
(typeQual == NA_NUMERIC_TYPE &&
static_cast<const NumericType*>(type)->isExact()),
QRDescriptorException,
"Invalid type for makeSubrangeOrBackbone() -- %d", typeQual);
Subrange<Int64>* intSubrange = (Subrange<Int64>*)subrange;
Int64 startVal = intSubrange->start;
Int64 endVal = intSubrange->end;
ItemExpr* top = NULL; // current top of backbone; eventual return value
ItemExpr* eqExpr; // construct each rangeitem=value pred here...
ConstValue* cv; // ...using equality to this constant
for (Int64 val=startVal; val<=endVal; val++)
{
cv = reconstituteInt64Value(type, val);
eqExpr = new(mvqrHeap_) BiRelat(ITM_EQUAL, subrangeItem, cv);
eqExpr->synthTypeAndValueId();
if (top)
top = new(mvqrHeap_) BiLogic(ITM_OR, top, eqExpr);
else
top = eqExpr;
}
return top;
} // makeSubrangeORBackbone
ItemExpr* OptRangeSpec::makeSubrangeItemExpr(SubrangeBase* subrange,
ItemExpr* subrangeItem) const
{
QRTRACER("makeSubrangeItemExpr");
// ItemExpr that we will build and return, representing the subrange.
ItemExpr* itemExpr;
// Nodes that the start and end values of the subrange will be attached
// to (as the 2nd child) as necessary.
ItemExpr *parentOfStart = NULL, *parentOfEnd = NULL;
// If the subrange is derived from an IN list or a disjunction of equality
// predicates, return an OR backbone of ITM_EQUALs.
if ((subrange->getSpecifiedValueCount() > 0))
return makeSubrangeORBackbone(subrange, subrangeItem);
// Build the tree, except for the ConstValue subtrees that will be attached
// later, after the underlying type of the subrange is determined.
// parentOfStart and parentOfEnd mark the nodes to attach them to.
if (subrange->isSingleValue())
{
// A single-point subrange may have been the result of intersection of
// two ranges that left it with adjustment flags adopted from the another
// subranges. They aren't relevant when the subrange represents a single
// value, and will distort the value used if left in place.
subrange->setStartAdjustment(0);
subrange->setEndAdjustment(0);
itemExpr = parentOfStart
= new(mvqrHeap_) BiRelat(ITM_EQUAL, subrangeItem);
}
else if (subrange->endIsMax())
if (subrange->startInclusive() && subrange->getStartAdjustment() == 0)
itemExpr = parentOfStart
= new(mvqrHeap_) BiRelat(ITM_GREATER_EQ, subrangeItem);
else
itemExpr = parentOfStart
= new(mvqrHeap_) BiRelat(ITM_GREATER, subrangeItem);
else if (subrange->startIsMin())
if (subrange->endInclusive() && subrange->getEndAdjustment() == 0)
itemExpr = parentOfEnd
= new(mvqrHeap_) BiRelat(ITM_LESS_EQ, subrangeItem);
else
itemExpr = parentOfEnd
= new(mvqrHeap_) BiRelat(ITM_LESS, subrangeItem);
else
{
itemExpr = new(mvqrHeap_) BiLogic(ITM_AND);
if (subrange->startInclusive() && subrange->getStartAdjustment() == 0)
itemExpr->child(0) = parentOfStart
= new(mvqrHeap_) BiRelat(ITM_GREATER_EQ, subrangeItem);
else
itemExpr->child(0) = parentOfStart
= new(mvqrHeap_) BiRelat(ITM_GREATER, subrangeItem);
if (subrange->endInclusive() && subrange->getEndAdjustment() == 0)
itemExpr->child(1) = parentOfEnd
= new(mvqrHeap_) BiRelat(ITM_LESS_EQ, subrangeItem);
else
itemExpr->child(1) = parentOfEnd
= new(mvqrHeap_) BiRelat(ITM_LESS, subrangeItem);
}
// Now the item expression is complete except for filling in the constants
// at the appropriate locations, denoted by the parentOfStart and parentOfEnd
// ItemExpr ptrs, each of which is null if not applicable (e.g., an unbounded
// range will only use either the start or end value, not both).
NAType* type = getType()->newCopy(mvqrHeap_);
type->resetSQLnullFlag();
type->resetSQLnullHdrSize();
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
type, QRDescriptorException,
"Call to getType() returned NULL in "
"OptRangeSpec::makeSubrangeItemExpr().");
NABuiltInTypeEnum typeQual = type->getTypeQualifier();
switch (typeQual)
{
case NA_NUMERIC_TYPE:
case NA_DATETIME_TYPE:
case NA_INTERVAL_TYPE:
if (typeQual == NA_DATETIME_TYPE ||
typeQual == NA_INTERVAL_TYPE ||
(typeQual == NA_NUMERIC_TYPE &&
static_cast<const NumericType*>(type)->isExact()))
{
Subrange<Int64>* intSubrange = (Subrange<Int64>*)subrange;
Int64 startVal = intSubrange->start - intSubrange->getStartAdjustment();
Int64 endVal = intSubrange->end + intSubrange->getEndAdjustment();
if (parentOfStart)
parentOfStart->child(1) = reconstituteInt64Value(type, startVal);
if (parentOfEnd)
parentOfEnd->child(1) = reconstituteInt64Value(type, endVal);
}
else
{
Subrange<double>* dblSubrange = (Subrange<double>*)subrange;
double startVal = dblSubrange->start;
double endVal = dblSubrange->end;
if (parentOfStart)
parentOfStart->child(1) = reconstituteDoubleValue(type, startVal);
if (parentOfEnd)
parentOfEnd->child(1) = reconstituteDoubleValue(type, endVal);
}
break;
case NA_CHARACTER_TYPE:
{
// Alignment is 2 for UCS2, 1 for single-byte char set.
if (type->getDataAlignment() == 2)
{
// Unicode string.
Subrange<RangeWString>* wcharSubrange =
(Subrange<RangeWString>*)subrange;
if (parentOfStart)
parentOfStart->child(1) =
new(mvqrHeap_) ConstValue(wcharSubrange->start);
if (parentOfEnd)
parentOfEnd->child(1) =
new(mvqrHeap_) ConstValue(wcharSubrange->end);
}
else
{
// Latin1 string.
Subrange<RangeString>* charSubrange
= (Subrange<RangeString>*)subrange;
if (parentOfStart)
parentOfStart->child(1) =
new(mvqrHeap_) ConstValue(charSubrange->start, ((CharType *)type)->getCharSet() );
if (parentOfEnd)
parentOfEnd->child(1) =
new(mvqrHeap_) ConstValue(charSubrange->end, ((CharType *)type)->getCharSet() );
}
}
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, logLevel_,
FALSE, QRDescriptorException,
"Unhandled data type in "
"OptRangeSpec::makeSubrangeItemExpr: %d",
typeQual);
break;
}
// This NAType object is never used in the creation of a ConstValue.
delete type;
return itemExpr;
} // makeSubrangeItemExpr()
void OptRangeSpec::intersectRange(OptRangeSpec* other)
{
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
sameRangeSubject(other) || !other->rangeSubjectIsSet(),
QRDescriptorException,
"Intersecting with range on a different col/expr, or range "
"subject is not set");
// Call the superclass version to intersect the ranges, and mark this range
// spec as being an intersection. The isIntersection_ flag will cause the
// itemexpr representing the overall range to be created, so its value id
// can be used as the id attribute of the RangePred element, instead of that
// of the initial predicate on the range column/expr. This is critical,
// because the predicate corresponding to that id is the one used to carry
// out a NotProvided rewrite instruction for a range pred.
RangeSpec::intersectRange(other);
isIntersection_ = TRUE;
}
void OptRangeSpec::unionRange(OptRangeSpec* other)
{
assertLogAndThrow(CAT_SQL_COMP_RANGE, logLevel_,
sameRangeSubject(other) || !other->rangeSubjectIsSet(),
QRDescriptorException,
"Unioning with range on a different col/expr, or range "
"subject is not set");
// Call the superclass version to union the ranges.
RangeSpec::unionRange(other);
}
// normWA is not used here. It is a parameter because the OptNormRangeSpec
// redefinition of this virtual function needs it.
ItemExpr* OptRangeSpec::getRangeItemExpr(NormWA* normWA) const
{
QRTRACER("OptRangeSpec::getRangeItemExpr");
// ItemExpr representing the application of the range condition to the
// subject column or expression.
ItemExpr* rangeItemExpr = NULL;
// If there are no values in the range, the predicate can't be satisfied;
// return a FALSE itemexpr.
if (!subranges_.entries() && !nullIncluded_)
rangeItemExpr = new(mvqrHeap_) BoolVal(ITM_RETURN_FALSE);
// If all values are covered by the range, the underlying predicate is
// necessarily true; return a TRUE itemexpr.
if (coversFullRange())
rangeItemExpr = new(mvqrHeap_) BoolVal(ITM_RETURN_TRUE);
// coversFullrange is false but we may have a full range [-inf..+inf] except
// for NULL values. In other words the predicate 'a > -1 OR a < 1' is converted
// to 'a IS NOT NULL'
if ((rangeItemExpr == NULL) && ((subranges_.entries() == 1) &&
subranges_[0]->coversFullRange()))
rangeItemExpr = new(mvqrHeap_) UnLogic(ITM_IS_NOT_NULL, getRangeExpr());
if (rangeItemExpr == NULL)
{
// ItemExpr representing the column or expression the range applies to.
ItemExpr* subjectItemExpr = getRangeExpr();
// Create a left-deep OR backbone joining the conditions representing the
// subranges of this range.
SubrangeBase* subrange = NULL;
for (CollIndex i=0; i < subranges_.entries(); i++)
{
subrange = subranges_[i];
if (!rangeItemExpr)
rangeItemExpr = makeSubrangeItemExpr(subrange, subjectItemExpr);
else
rangeItemExpr =
new(mvqrHeap_) BiLogic(ITM_OR, rangeItemExpr,
makeSubrangeItemExpr(subrange,
subjectItemExpr));
}
// If the range permits the null value, OR in an isNull predicate.
if (nullIncluded_)
{
ItemExpr* isNullItemExpr = new(mvqrHeap_) UnLogic(ITM_IS_NULL,
subjectItemExpr);
if (!rangeItemExpr)
rangeItemExpr = isNullItemExpr;
else
rangeItemExpr = new(mvqrHeap_) BiLogic(ITM_OR,
rangeItemExpr,
isNullItemExpr);
}
}
rangeItemExpr->synthTypeAndValueId();
rangeItemExpr->setRangespecItemExpr(TRUE);
return rangeItemExpr;
} // OptRangeSpec::getRangeItemExpr()
ItemExpr* OptNormRangeSpec::getRangeItemExpr(NormWA* normWA) const
{
QRTRACER("OptNormRangeSpec::getRangeItemExpr");
// Call the superclass version to do the primary work of producing an
// ItemExpr from the range specification.
ItemExpr* rangeItemExpr = OptRangeSpec::getRangeItemExpr();
// Copy LIKE pred info from original expr.
OperatorTypeEnum op = rangeItemExpr->getOperatorType();
if ((op == ITM_AND)||(op == ITM_OR))
{
op = rangeItemExpr->child(0)->getOperatorType();
if ( (op == ITM_GREATER_EQ) ||(op == ITM_GREATER) ||
(op == ITM_LESS) ||(op == ITM_LESS_EQ))
{
BiRelat *br = (BiRelat *) rangeItemExpr->child(0).getPtr();
OperatorTypeEnum op1 = getOriginalItemExpr()->child(0)->getOperatorType();
if( (op1 == ITM_GREATER_EQ) || (op1 == ITM_GREATER) ||
(op1 == ITM_LESS) ||(op1 == ITM_LESS_EQ))
{
BiRelat *br1 = (BiRelat *) getOriginalItemExpr()->child(0).getPtr();
br->setAddedForLikePred(br1->addedForLikePred());
br->setOriginalLikeExprId(br1->originalLikeExprId());
br->setLikeSelectivity(br1->getLikeSelectivity());
if(br1->isSelectivitySetUsingHint())
{
br->setSelectivitySetUsingHint();
br->setSelectivityFactor(br1->getSelectivityFactor());
}
}
}
op = rangeItemExpr->child(1)->getOperatorType();
if ( (op == ITM_GREATER_EQ) ||(op == ITM_GREATER) ||
(op == ITM_LESS) ||(op == ITM_LESS_EQ))
{
BiRelat *br = (BiRelat *) rangeItemExpr->child(1).getPtr();
OperatorTypeEnum op1 = getOriginalItemExpr()->child(1)->getOperatorType();
if( (op1 == ITM_GREATER_EQ) || (op1 == ITM_GREATER) ||
(op1 == ITM_LESS) ||(op1 == ITM_LESS_EQ))
{
BiRelat *br1 = (BiRelat *) getOriginalItemExpr()->child(1).getPtr();
br->setAddedForLikePred(br1->addedForLikePred());
br->setOriginalLikeExprId(br1->originalLikeExprId());
br->setLikeSelectivity(br1->getLikeSelectivity());
if(br1->isSelectivitySetUsingHint())
{
br->setSelectivitySetUsingHint();
br->setSelectivityFactor(br1->getSelectivityFactor());
}
}
}
}
else if ( (op == ITM_GREATER_EQ) ||(op == ITM_GREATER) ||
(op == ITM_LESS) ||(op == ITM_LESS_EQ))
{
BiRelat *br = (BiRelat *) rangeItemExpr;
OperatorTypeEnum op1 = getOriginalItemExpr()->getOperatorType();
if( (op1 == ITM_GREATER_EQ) || (op1 == ITM_GREATER) ||
(op1 == ITM_LESS) ||(op1 == ITM_LESS_EQ))
{
BiRelat *br1 = (BiRelat *) getOriginalItemExpr();
br->setAddedForLikePred(br1->addedForLikePred());
br->setOriginalLikeExprId(br1->originalLikeExprId());
br->setLikeSelectivity(br1->getLikeSelectivity());
if(br1->isSelectivitySetUsingHint())
{
br->setSelectivitySetUsingHint();
br->setSelectivityFactor(br1->getSelectivityFactor());
}
}
}
if(((ItemExpr *)getOriginalItemExpr())->isSelectivitySetUsingHint())
{
rangeItemExpr->setSelectivitySetUsingHint();
rangeItemExpr->setSelectivityFactor(getOriginalItemExpr()->getSelectivityFactor());
}
// Now associate the range spec's itemexpr with the value id of the
// original expression.
ValueId vid = getOriginalItemExpr()->getValueId();
vid.replaceItemExpr(rangeItemExpr);
if (normWA)
rangeItemExpr->normalizeNode(*normWA);
return rangeItemExpr;
}
void OptNormRangeSpec::intersectRange(OptNormRangeSpec* other)
{
// Build the item expression tree corresponding to the combined range.
ItemExpr* otherItemExpr = const_cast<ItemExpr*>(other->getOriginalItemExpr());
if (originalItemExpr_ && otherItemExpr)
{
originalItemExpr_ = new(mvqrHeap_) BiLogic(ITM_AND,
originalItemExpr_,
otherItemExpr);
originalItemExpr_->synthTypeAndValueId(TRUE);
}
else
{
if (!originalItemExpr_)
{
QRLogger::log(CAT_SQL_COMP_RANGE, logLevel_,
"OptNormRangeSpec::intersectRange -- originalItemExpr_ is NULL.");
}
if (!otherItemExpr)
{
QRLogger::log(CAT_SQL_COMP_RANGE, logLevel_,
"OptNormRangeSpec::intersectRange -- other op has NULL ItemExpr.");
}
}
// Now call the superclass version to intersect the ranges.
OptRangeSpec::intersectRange(other);
}
void OptNormRangeSpec::unionRange(OptNormRangeSpec* other)
{
// Build the item expression tree corresponding to the combined range.
ItemExpr* otherItemExpr = const_cast<ItemExpr*>(other->getOriginalItemExpr());
if (originalItemExpr_ && otherItemExpr)
{
originalItemExpr_ = new(mvqrHeap_) BiLogic(ITM_OR,
originalItemExpr_,
otherItemExpr);
originalItemExpr_->synthTypeAndValueId(TRUE);
}
else
{
if (!originalItemExpr_)
{
QRLogger::log(CAT_SQL_COMP_RANGE, logLevel_,
"OptNormRangeSpec::unionRange -- originalItemExpr_ is NULL.");
}
if (!otherItemExpr)
{
QRLogger::log(CAT_SQL_COMP_RANGE, logLevel_,
"OptNormRangeSpec::unionRange -- other op has NULL ItemExpr.");
}
}
// Now call the superclass version to union the ranges.
OptRangeSpec::unionRange(other);
}
// Convert a date, time, or timestamp value to an integral form for use with
// range specifications.
static Int64 getInt64ValueFromDateTime(ConstValue* val,
const DatetimeType* rangeColType,
const DatetimeType* constType,
NABoolean& truncated,
logLevel level)
{
char dateRep[11];
Int64 i64val;
char* valPtr = (char*)val->getConstValue() +
val->getType()->getSQLnullHdrSize();
Lng32 rangeColFracSecPrec = rangeColType->getFractionPrecision();
Lng32 constValueFracSecPrec = constType->getFractionPrecision();
Lng32 fracSec;
truncated = FALSE;
switch (constType->getSubtype())
{
case DatetimeType::SUBTYPE_SQLDate:
memcpy(dateRep, valPtr, 4);
memset(dateRep+4, 0, 7);
i64val = DatetimeType::julianTimestampValue(dateRep, 11, 0)
/ SubrangeBase::MICROSECONDS_IN_DAY;
break;
case DatetimeType::SUBTYPE_SQLTime:
i64val = *valPtr * 3600 + *(valPtr+1) * 60 + *(valPtr+2);
i64val *= 1000000; // in microseconds
if (constValueFracSecPrec)
{
memcpy(&fracSec, valPtr+3, 4);
i64val += fracSec * (Int64)pow(10, 6-constValueFracSecPrec);
}
break;
case DatetimeType::SUBTYPE_SQLTimestamp:
i64val = DatetimeType::julianTimestampValue(valPtr,
constValueFracSecPrec ? 11 : 7,
constValueFracSecPrec);
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
FALSE, QRDescriptorException,
"Invalid datetime subtype -- %d", constType->getSubtype());
}
// For time or timestamp (if date, both compared values will be 0), truncate
// any extra (wrt the col type) fractional precision from the value and mark
// it as truncated. The caller will use this to determine if the comparison
// operator with the truncated value needs to be changed (e.g., t<time'12:00:00.4'
// <--> t<=time'12:00:00', if t is time(0).
if (constValueFracSecPrec > rangeColFracSecPrec)
{
Int64 scaleFactor = (Int64)pow(10, 6-rangeColFracSecPrec);
Int64 unscaledI64Val = i64val;
i64val = i64val / scaleFactor * scaleFactor;
truncated = (i64val != unscaledI64Val);
}
return i64val;
}
// Return an interval value as an Int64 for use with range specifications.
// An interval value of either type (year-month or day-time) will be calculated
// in terms of the least significant field of that type, regardless of the
// actual fields used in the declaration. For example, an interval specified as
// HOUR TO MINUTE will return a value which is a number of microseconds. This
// facilitates comparing intervals of different granularity.
//
// A day-time interval that ends in seconds may specify a fractional precision
// less than the default (e.g., Interval '5.1' Second(2,2)), in which case its
// raw value is in units of a lower precision than microseconds (hundredths of
// a second in the example). We scale such values to microseconds so that
// everything is comparable. In the example above, the value used would be
// 5100000 instead of the original internal integral value of 510.
static Int64 getInt64ValueFromInterval(ConstValue* constVal,
const IntervalType* colIntvlType,
NABoolean& truncated,
NABoolean& valWasNegative,
logLevel level)
{
const IntervalType* constIntvlType =
static_cast<const IntervalType*>(constVal->getType());
Int64 i64val;
char* val = (char*)constVal->getConstValue();
Lng32 storageSize = constVal->getStorageSize();
Lng32 nullHdrSize = constVal->getType()->getSQLnullHdrSize();
val += nullHdrSize;
storageSize -= nullHdrSize;
switch (storageSize)
{
// No need to check for signed/unsigned before checking for negative;
// interval type is always signed.
case 2:
{
Int16 valx;
memcpy(&valx, val, sizeof(Int16));
valWasNegative = (valx < 0);
i64val = valx;
}
break;
case 4:
{
Lng32 valx;
memcpy(&valx, val, sizeof(Lng32));
valWasNegative = (valx < 0);
i64val = valx;
}
break;
case 8:
memcpy(&i64val, val, sizeof(i64val));
valWasNegative = (i64val < 0);
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
FALSE, QRDescriptorException,
"Invalid interval storage length -- %d",
storageSize);
break;
}
// Now cast the value in terms of the least significant field for the constant's
// interval type (months for year-month, microseconds for day-time), which may
// have different fields than those of the column's type.
switch (constIntvlType->getEndField())
{
case REC_DATE_YEAR:
// Multiply by number of months in a year
i64val *= 12;
break;
case REC_DATE_MONTH:
// Value is already in correct units (months).
break;
case REC_DATE_DAY:
// Multiply by number of microseconds in a day.
i64val *= (24LL * 60 * 60000000);
break;
case REC_DATE_HOUR:
// Multiply by number of microseconds in an hour.
i64val *= (60LL * 60000000);
break;
case REC_DATE_MINUTE:
// Multiply by number of microseconds in a minute.
i64val *= 60000000;
break;
case REC_DATE_SECOND:
// We want all intervals with seconds as the end field to be represented
// as a number of microseconds to ensure comparability, so if the
// fractional precision is less than the default (6), scale it to
// microseconds.
i64val *= (Int64)pow(10, 6 - constIntvlType->getFractionPrecision());
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
FALSE, QRDescriptorException,
"Invalid end field for interval -- %d",
constIntvlType->getEndField());
break;
}
// Now discard any part of the constant value owing to extra precision in the
// constant's type. For instance, if the column type is INTERVAL YEAR and the
// constant is "interval'2-7'year to month", the constant is truncated to 2
// years. We remember that this truncation occurred, as it may affect the
// comparison operator used in the rangespec.
Int64 scaleFactor;
Int64 origI64Val = i64val;
switch (colIntvlType->getEndField())
{
// Do integer division to scrape off any excess precision in the
// constant value, then multipy by same to restore it as a number of
// months (year-month interval) or microseconds (day-time).
case REC_DATE_YEAR:
// Divide/multiply by number of months in a year
i64val = i64val / 12 * 12;
break;
case REC_DATE_MONTH:
// No possibility of extra precision for the constant value.
break;
case REC_DATE_DAY:
// Divide/multiply by number of microseconds in a day.
i64val = i64val / (24LL * 60 * 60000000) * (24LL * 60 * 60000000);
break;
case REC_DATE_HOUR:
// Divide/multiply by number of microseconds in an hour.
i64val = i64val / (60LL * 60000000) * (60LL * 60000000);
break;
case REC_DATE_MINUTE:
// Divide/multiply by number of microseconds in a minute.
i64val = i64val / 60000000 * 60000000;
break;
case REC_DATE_SECOND:
// Divide/multiply by 10 ** <unused frac secs>.
scaleFactor = (Int64)pow(10, 6 - colIntvlType->getFractionPrecision());
i64val = i64val / scaleFactor * scaleFactor;
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
FALSE, QRDescriptorException,
"Invalid end field for interval -- %d",
colIntvlType->getEndField());
break;
}
truncated = (origI64Val != i64val);
return i64val;
}
Int64 getInt64Value(ConstValue* val, const NAType* rangeColType,
NABoolean& truncated, NABoolean& valWasNegative,
logLevel level)
{
truncated = FALSE;
valWasNegative = FALSE;
// If the constant value is a date, time, timestamp, or interval, pass it on
// to the correct function for handling that type.
const NAType* constValType = val->getType();
if (constValType->getTypeQualifier() == NA_DATETIME_TYPE)
return getInt64ValueFromDateTime(val,
static_cast<const DatetimeType*>(rangeColType),
static_cast<const DatetimeType*>(constValType),
truncated, level);
else if (constValType->getTypeQualifier() == NA_INTERVAL_TYPE)
return getInt64ValueFromInterval(val,
static_cast<const IntervalType*>(rangeColType),
truncated, valWasNegative, level);
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
constValType->getTypeQualifier() == NA_NUMERIC_TYPE,
QRDescriptorException,
"Unexpected date type -- %d",
constValType->getTypeQualifier());
const NumericType* constValNumType = static_cast<const NumericType*>(constValType);
Lng32 rangeColumnScale = rangeColType->getScale();
NABoolean isExactNumeric = val->isExactNumeric();
Lng32 constValueScale = (isExactNumeric ? constValNumType->getScale() : 0);
char* valuePtr = (char*)val->getConstValue();
Lng32 storageSize = val->getStorageSize();
Lng32 nullHdrSize = constValNumType->getSQLnullHdrSize();
valuePtr += nullHdrSize;
storageSize -= nullHdrSize;
// Scale factor for exact numeric constant values.
Int64 scaleFactor = 1;
if (constValueScale < rangeColumnScale)
scaleFactor = (Int64)pow(10, rangeColumnScale - constValueScale);
// Scale factor for approximate (floating-point) constant values.
double dblScaleFactor = pow(10, rangeColumnScale);
Int64 i64val = 0;
Float64 flt64val = 0;
if (constValNumType->isDecimal())
{
// Decode the decimal value from the string of digits. The digit values
// are the lower nibble of each byte. As we go from left to right,
// multiple the accumulated value by 10, and add the value of the next
// digit. The value is negative if the upper nibble of the first byte is
// 11 (0xB0 after masking off lower nibble).
char* p = valuePtr;
for (Lng32 i=0; i<storageSize; i++)
(i64val *= 10) += (*p++ & 0x0F);
if ((*valuePtr & 0xF0) == 0xB0)
{
i64val *= -1;
valWasNegative = TRUE;
}
}
else
switch (storageSize)
{
case 1:
{
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
isExactNumeric, QRDescriptorException,
"Constant value of size 1 not exact numeric, type is: %d",
constValNumType->getTypeQualifier());
Int8 i8val;
memcpy(&i8val, valuePtr, storageSize);
if (constValNumType->isSigned())
{
valWasNegative = (i8val < 0);
i64val = (Int64)(i8val * scaleFactor);
}
else
i64val = (Int64)((UInt8)i8val * scaleFactor);
}
break;
case 2:
{
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
isExactNumeric, QRDescriptorException,
"Constant value of size 2 not exact numeric, type is: %d",
constValNumType->getTypeQualifier());
Int16 i16val;
memcpy(&i16val, valuePtr, storageSize);
if (constValNumType->isSigned())
{
valWasNegative = (i16val < 0);
i64val = (Int64)(i16val * scaleFactor);
}
else
i64val = (Int64)((UInt16)i16val * scaleFactor);
}
break;
case 4:
if (isExactNumeric)
{
Lng32 i32val;
memcpy(&i32val, valuePtr, storageSize);
if (constValNumType->isSigned())
{
valWasNegative = (i32val < 0);
i64val = (Int64)(i32val * scaleFactor);
}
else
i64val = (Int64)((UInt32)i32val * scaleFactor);
}
else
{
Float32 flt32val;
memcpy(&flt32val, valuePtr, storageSize);
valWasNegative = (flt32val < 0);
flt64val = flt32val * dblScaleFactor;
if (flt64val < LLONG_MIN)
i64val = LLONG_MIN;
else if (flt64val > LLONG_MAX)
i64val = LLONG_MAX;
else
i64val = (Int64)flt64val;
}
break;
case 8:
if (isExactNumeric)
{
memcpy(&i64val, valuePtr, storageSize);
valWasNegative = (i64val < 0);
i64val *= scaleFactor;
}
else
{
memcpy(&flt64val, valuePtr, storageSize);
valWasNegative = (flt64val < 0);
flt64val *= dblScaleFactor;
if (flt64val < LLONG_MIN)
i64val = LLONG_MIN;
else if (flt64val > LLONG_MAX)
i64val = LLONG_MAX;
else
i64val = (Int64)flt64val;
}
break;
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
FALSE, QRDescriptorException,
"Numeric constant of unexpected size: %d bytes",
storageSize);
return 0; // avoid warning
}
// If the constant has more digits to the right of the decimal point than the
// range column, note it as truncated if any of the excess digits is nonzero.
// The caller will use this to determine if the comparison operator with the
// truncated value needs to be changed (e.g., i>4.3 <--> i>=4, if scale of i
// is 0).
if (isExactNumeric)
{
if (constValueScale > rangeColumnScale)
{
scaleFactor = (Int64)pow(10, constValueScale - rangeColumnScale);
truncated = (i64val / scaleFactor * scaleFactor != i64val);
return i64val / scaleFactor;
}
else
return i64val;
}
else
{
truncated = (i64val != flt64val);
return i64val;
}
}
double getDoubleValue(ConstValue* val, logLevel level)
{
// If the constant is a SystemLiteral (typically produced by the cast of
// constant value), the value pointer and storage size will include a null
// indicator header field that we have to take into account when accessing
// the value. It also means the actual value field may not be aligned
// properly, so we need to memcpy the value representation into a variable
// of the appropriate type.
const NAType* constValType = val->getType();
Lng32 nullHdrSize = constValType->getSQLnullHdrSize();
Lng32 valueStorageSize = val->getStorageSize() - nullHdrSize;
void* valuePtr = (char*)val->getConstValue() + nullHdrSize;
NABoolean isExactNumeric = val->isExactNumeric();
double scaleDivisor = pow(10, isExactNumeric ? constValType->getScale() : 0);
if (static_cast<const NumericType*>(constValType)->isDecimal())
{
// Decode the decimal value from the string of digits. The digit values
// are the lower nibble of each byte. As we go from left to right,
// multiple the accumulated value by 10, and add the value of the next
// digit. The value is negative if the upper nibble of the first byte is
// 11 (0xB0 after masking off lower nibble).
char* p = (char*)valuePtr;
Int64 i64val = 0;
for (Lng32 i=0; i<valueStorageSize; i++)
(i64val *= 10) += (*p++ & 0x0F);
if ((*(char*)valuePtr & 0xF0) == 0xB0)
i64val *= -1;
return i64val / scaleDivisor;
}
switch (valueStorageSize)
{
case 1: // tinyint
{
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
isExactNumeric, QRDescriptorException,
"const value of size 1 not exact numeric: %d",
constValType->getTypeQualifier());
Int8 i8val;
memcpy(&i8val, valuePtr, valueStorageSize);
return i8val / scaleDivisor;
}
case 2: // smallint
{
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
isExactNumeric, QRDescriptorException,
"const value of size 2 not exact numeric: %d",
constValType->getTypeQualifier());
Int16 i16val;
memcpy(&i16val, valuePtr, valueStorageSize);
return i16val / scaleDivisor;
}
case 4: // int
if (isExactNumeric)
{
Lng32 i32val;
memcpy(&i32val, valuePtr, valueStorageSize);
return i32val / scaleDivisor;
}
else
{
Float32 fltval;
memcpy(&fltval, valuePtr, valueStorageSize);
return fltval;
}
case 8: // largeint
if (isExactNumeric)
{
// possible loss of data
Int64 i64val;
memcpy(&i64val, valuePtr, valueStorageSize);
return (double)(i64val / scaleDivisor);
}
else
{
Float64 dblval;
memcpy(&dblval, valuePtr, valueStorageSize);
return dblval;
}
default:
assertLogAndThrow1(CAT_SQL_COMP_RANGE, level,
FALSE, QRDescriptorException,
"Numeric constant of unexpected size: %d bytes",
val->getStorageSize());
return 0; // avoid warning
}
}
NABoolean RangeInfo::operator==(const RangeInfo& other)
{
if (rangeSpec_->getRangeJoinPredId() != NULL_VALUE_ID)
return rangeSpec_->getRangeJoinPredId() ==
other.rangeSpec_->getRangeJoinPredId();
else if (rangeSpec_->getRangeColValueId() != NULL_VALUE_ID)
return rangeSpec_->getRangeColValueId() ==
other.rangeSpec_->getRangeColValueId();
else
return rangeSpec_->getRangeExprText()==other.rangeSpec_->getRangeExprText();
}
| 1 | 18,790 | This adds a case-insensitive flag to the type in the RangeSpec, but I don't think RangeSpecs are written to handle case-insensitive comparisons. Take a look at the methods that deal with comparisons when building RangeSpecs, in file Range.cpp. So, I think you would have to do one of two things: a) disable the RangeSpec transformation for case-insensitive comparison operators (the easy way) or b) change the RangeSpec methods to handle case-insensitive comparisons. | apache-trafodion | cpp |
@@ -637,6 +637,16 @@ public class MethodResolutionLogic {
throw new UnsupportedOperationException(typeDeclaration.getClass().getCanonicalName());
}
+ public static SymbolReference<ResolvedMethodDeclaration> solveMethodInFQN(String fqn, String name,
+ List<ResolvedType> argumentsTypes, boolean staticOnly, TypeSolver typeSolver) {
+ SymbolReference<ResolvedReferenceTypeDeclaration> typeRef = typeSolver.tryToSolveType(fqn);
+ if (typeRef.isSolved()) {
+ return solveMethodInType(typeRef.getCorrespondingDeclaration(), name, argumentsTypes,
+ staticOnly, typeSolver);
+ }
+ return SymbolReference.unsolved(ResolvedMethodDeclaration.class);
+ }
+
private static void inferTypes(ResolvedType source, ResolvedType target, Map<ResolvedTypeParameterDeclaration, ResolvedType> mappings) {
| 1 | /*
* Copyright 2016 Federico Tomassetti
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.javaparser.symbolsolver.resolution;
import com.github.javaparser.resolution.MethodAmbiguityException;
import com.github.javaparser.resolution.MethodUsage;
import com.github.javaparser.resolution.declarations.*;
import com.github.javaparser.resolution.types.*;
import com.github.javaparser.symbolsolver.core.resolution.Context;
import com.github.javaparser.symbolsolver.javaparsermodel.declarations.JavaParserAnonymousClassDeclaration;
import com.github.javaparser.symbolsolver.javaparsermodel.declarations.JavaParserClassDeclaration;
import com.github.javaparser.symbolsolver.javaparsermodel.declarations.JavaParserEnumDeclaration;
import com.github.javaparser.symbolsolver.javaparsermodel.declarations.JavaParserInterfaceDeclaration;
import com.github.javaparser.symbolsolver.javaparsermodel.declarations.JavaParserMethodDeclaration;
import com.github.javaparser.symbolsolver.javassistmodel.JavassistClassDeclaration;
import com.github.javaparser.symbolsolver.javassistmodel.JavassistEnumDeclaration;
import com.github.javaparser.symbolsolver.javassistmodel.JavassistInterfaceDeclaration;
import com.github.javaparser.symbolsolver.model.resolution.SymbolReference;
import com.github.javaparser.symbolsolver.model.resolution.TypeSolver;
import com.github.javaparser.symbolsolver.model.typesystem.*;
import com.github.javaparser.symbolsolver.reflectionmodel.ReflectionClassDeclaration;
import com.github.javaparser.symbolsolver.reflectionmodel.ReflectionEnumDeclaration;
import com.github.javaparser.symbolsolver.reflectionmodel.ReflectionInterfaceDeclaration;
import java.util.*;
import java.util.stream.Collectors;
/**
* @author Federico Tomassetti
*/
public class MethodResolutionLogic {
private static List<ResolvedType> groupVariadicParamValues(List<ResolvedType> argumentsTypes, int startVariadic, ResolvedType variadicType) {
List<ResolvedType> res = new ArrayList<>(argumentsTypes.subList(0, startVariadic));
List<ResolvedType> variadicValues = argumentsTypes.subList(startVariadic, argumentsTypes.size());
if (variadicValues.isEmpty()) {
// TODO if there are no variadic values we should default to the bound of the formal type
res.add(variadicType);
} else {
ResolvedType componentType = findCommonType(variadicValues);
res.add(new ResolvedArrayType(componentType));
}
return res;
}
private static ResolvedType findCommonType(List<ResolvedType> variadicValues) {
if (variadicValues.isEmpty()) {
throw new IllegalArgumentException();
}
// TODO implement this decently
return variadicValues.get(0);
}
public static boolean isApplicable(ResolvedMethodDeclaration method, String name, List<ResolvedType> argumentsTypes, TypeSolver typeSolver) {
return isApplicable(method, name, argumentsTypes, typeSolver, false);
}
private static boolean isApplicable(ResolvedMethodDeclaration method, String name, List<ResolvedType> argumentsTypes, TypeSolver typeSolver, boolean withWildcardTolerance) {
if (!method.getName().equals(name)) {
return false;
}
if (method.hasVariadicParameter()) {
int pos = method.getNumberOfParams() - 1;
if (method.getNumberOfParams() == argumentsTypes.size()) {
// check if the last value is directly assignable as an array
ResolvedType expectedType = method.getLastParam().getType();
ResolvedType actualType = argumentsTypes.get(pos);
if (!expectedType.isAssignableBy(actualType)) {
for (ResolvedTypeParameterDeclaration tp : method.getTypeParameters()) {
expectedType = replaceTypeParam(expectedType, tp, typeSolver);
}
if (!expectedType.isAssignableBy(actualType)) {
if (actualType.isArray() && expectedType.isAssignableBy(actualType.asArrayType().getComponentType())) {
argumentsTypes.set(pos, actualType.asArrayType().getComponentType());
} else {
argumentsTypes = groupVariadicParamValues(argumentsTypes, pos, method.getLastParam().getType());
}
}
} // else it is already assignable, nothing to do
} else {
if (pos > argumentsTypes.size()) {
return false;
}
argumentsTypes = groupVariadicParamValues(argumentsTypes, pos, method.getLastParam().getType());
}
}
if (method.getNumberOfParams() != argumentsTypes.size()) {
return false;
}
Map<String, ResolvedType> matchedParameters = new HashMap<>();
boolean needForWildCardTolerance = false;
for (int i = 0; i < method.getNumberOfParams(); i++) {
ResolvedType expectedType = method.getParam(i).getType();
ResolvedType actualType = argumentsTypes.get(i);
if ((expectedType.isTypeVariable() && !(expectedType.isWildcard())) && expectedType.asTypeParameter().declaredOnMethod()) {
matchedParameters.put(expectedType.asTypeParameter().getName(), actualType);
continue;
}
boolean isAssignableWithoutSubstitution = expectedType.isAssignableBy(actualType) ||
(method.getParam(i).isVariadic() && new ResolvedArrayType(expectedType).isAssignableBy(actualType));
if (!isAssignableWithoutSubstitution && expectedType.isReferenceType() && actualType.isReferenceType()) {
isAssignableWithoutSubstitution = isAssignableMatchTypeParameters(
expectedType.asReferenceType(),
actualType.asReferenceType(),
matchedParameters);
}
if (!isAssignableWithoutSubstitution) {
List<ResolvedTypeParameterDeclaration> typeParameters = method.getTypeParameters();
typeParameters.addAll(method.declaringType().getTypeParameters());
for (ResolvedTypeParameterDeclaration tp : typeParameters) {
expectedType = replaceTypeParam(expectedType, tp, typeSolver);
}
if (!expectedType.isAssignableBy(actualType)) {
if (actualType.isWildcard() && withWildcardTolerance && !expectedType.isPrimitive()) {
needForWildCardTolerance = true;
continue;
}
if (method.hasVariadicParameter() && i == method.getNumberOfParams() - 1) {
if (new ResolvedArrayType(expectedType).isAssignableBy(actualType)) {
continue;
}
}
return false;
}
}
}
return !withWildcardTolerance || needForWildCardTolerance;
}
public static boolean isAssignableMatchTypeParameters(ResolvedType expected, ResolvedType actual,
Map<String, ResolvedType> matchedParameters) {
if (expected.isReferenceType() && actual.isReferenceType()) {
return isAssignableMatchTypeParameters(expected.asReferenceType(), actual.asReferenceType(), matchedParameters);
} else if (expected.isTypeVariable()) {
matchedParameters.put(expected.asTypeParameter().getName(), actual);
return true;
} else {
throw new UnsupportedOperationException(expected.getClass().getCanonicalName() + " " + actual.getClass().getCanonicalName());
}
}
public static boolean isAssignableMatchTypeParameters(ResolvedReferenceType expected, ResolvedReferenceType actual,
Map<String, ResolvedType> matchedParameters) {
if (actual.getQualifiedName().equals(expected.getQualifiedName())) {
return isAssignableMatchTypeParametersMatchingQName(expected, actual, matchedParameters);
} else {
List<ResolvedReferenceType> ancestors = actual.getAllAncestors();
for (ResolvedReferenceType ancestor : ancestors) {
if (isAssignableMatchTypeParametersMatchingQName(expected, ancestor, matchedParameters)) {
return true;
}
}
}
return false;
}
private static boolean isAssignableMatchTypeParametersMatchingQName(ResolvedReferenceType expected, ResolvedReferenceType actual,
Map<String, ResolvedType> matchedParameters) {
if (!expected.getQualifiedName().equals(actual.getQualifiedName())) {
return false;
}
if (expected.typeParametersValues().size() != actual.typeParametersValues().size()) {
throw new UnsupportedOperationException();
//return true;
}
for (int i = 0; i < expected.typeParametersValues().size(); i++) {
ResolvedType expectedParam = expected.typeParametersValues().get(i);
ResolvedType actualParam = actual.typeParametersValues().get(i);
// In the case of nested parameterizations eg. List<R> <-> List<Integer>
// we should peel off one layer and ensure R <-> Integer
if (expectedParam.isReferenceType() && actualParam.isReferenceType()){
ResolvedReferenceType r1 = expectedParam.asReferenceType();
ResolvedReferenceType r2 = actualParam.asReferenceType();
return isAssignableMatchTypeParametersMatchingQName(r1, r2, matchedParameters);
}
if (expectedParam.isTypeVariable()) {
String expectedParamName = expectedParam.asTypeParameter().getName();
if (!actualParam.isTypeVariable() || !actualParam.asTypeParameter().getName().equals(expectedParamName)) {
return matchTypeVariable(expectedParam.asTypeVariable(), actualParam, matchedParameters);
}
} else if (expectedParam.isReferenceType()) {
if (actualParam.isTypeVariable()) {
return matchTypeVariable(actualParam.asTypeVariable(), expectedParam, matchedParameters);
} else if (!expectedParam.equals(actualParam)) {
return false;
}
} else if (expectedParam.isWildcard()) {
if (expectedParam.asWildcard().isExtends()) {
return isAssignableMatchTypeParameters(expectedParam.asWildcard().getBoundedType(), actual, matchedParameters);
}
// TODO verify super bound
return true;
} else {
throw new UnsupportedOperationException(expectedParam.describe());
}
}
return true;
}
private static boolean matchTypeVariable(ResolvedTypeVariable typeVariable, ResolvedType type, Map<String, ResolvedType> matchedParameters) {
String typeParameterName = typeVariable.asTypeParameter().getName();
if (matchedParameters.containsKey(typeParameterName)) {
ResolvedType matchedParameter = matchedParameters.get(typeParameterName);
if (matchedParameter.isAssignableBy(type)) {
return true;
} else if (type.isAssignableBy(matchedParameter)) {
// update matchedParameters to contain the more general type
matchedParameters.put(typeParameterName, type);
return true;
}
return false;
} else {
matchedParameters.put(typeParameterName, type);
}
return true;
}
public static ResolvedType replaceTypeParam(ResolvedType type, ResolvedTypeParameterDeclaration tp, TypeSolver typeSolver) {
if (type.isTypeVariable()) {
if (type.describe().equals(tp.getName())) {
List<ResolvedTypeParameterDeclaration.Bound> bounds = tp.getBounds();
if (bounds.size() > 1) {
throw new UnsupportedOperationException();
} else if (bounds.size() == 1) {
return bounds.get(0).getType();
} else {
return new ReferenceTypeImpl(typeSolver.solveType(Object.class.getCanonicalName()), typeSolver);
}
}
return type;
} else if (type.isPrimitive()) {
return type;
} else if (type.isArray()) {
return new ResolvedArrayType(replaceTypeParam(type.asArrayType().getComponentType(), tp, typeSolver));
} else if (type.isReferenceType()) {
ResolvedReferenceType result = type.asReferenceType();
result = result.transformTypeParameters(typeParam -> replaceTypeParam(typeParam, tp, typeSolver)).asReferenceType();
return result;
} else if (type.isWildcard()) {
if (type.describe().equals(tp.getName())) {
List<ResolvedTypeParameterDeclaration.Bound> bounds = tp.getBounds();
if (bounds.size() > 1) {
throw new UnsupportedOperationException();
} else if (bounds.size() == 1) {
return bounds.get(0).getType();
} else {
return new ReferenceTypeImpl(typeSolver.solveType(Object.class.getCanonicalName()), typeSolver);
}
}
return type;
} else {
throw new UnsupportedOperationException("Replacing " + type + ", param " + tp + " with " + type.getClass().getCanonicalName());
}
}
public static boolean isApplicable(MethodUsage method, String name, List<ResolvedType> argumentsTypes, TypeSolver typeSolver) {
if (!method.getName().equals(name)) {
return false;
}
// TODO Consider varargs
if (method.getNoParams() != argumentsTypes.size()) {
return false;
}
for (int i = 0; i < method.getNoParams(); i++) {
ResolvedType expectedType = method.getParamType(i);
ResolvedType expectedTypeWithoutSubstitutions = expectedType;
ResolvedType expectedTypeWithInference = method.getParamType(i);
ResolvedType actualType = argumentsTypes.get(i);
List<ResolvedTypeParameterDeclaration> typeParameters = method.getDeclaration().getTypeParameters();
typeParameters.addAll(method.declaringType().getTypeParameters());
if (expectedType.describe().equals(actualType.describe())){
return true;
}
Map<ResolvedTypeParameterDeclaration, ResolvedType> derivedValues = new HashMap<>();
for (int j = 0; j < method.getParamTypes().size(); j++) {
ResolvedParameterDeclaration parameter = method.getDeclaration().getParam(i);
ResolvedType parameterType = parameter.getType();
if (parameter.isVariadic()) {
parameterType = parameterType.asArrayType().getComponentType();
}
inferTypes(argumentsTypes.get(j), parameterType, derivedValues);
}
for (Map.Entry<ResolvedTypeParameterDeclaration, ResolvedType> entry : derivedValues.entrySet()){
ResolvedTypeParameterDeclaration tp = entry.getKey();
expectedTypeWithInference = expectedTypeWithInference.replaceTypeVariables(tp, entry.getValue());
}
for (ResolvedTypeParameterDeclaration tp : typeParameters) {
if (tp.getBounds().isEmpty()) {
//expectedType = expectedType.replaceTypeVariables(tp.getName(), new ReferenceTypeUsageImpl(typeSolver.solveType(Object.class.getCanonicalName()), typeSolver));
expectedType = expectedType.replaceTypeVariables(tp, ResolvedWildcard.extendsBound(new ReferenceTypeImpl(typeSolver.solveType(Object.class.getCanonicalName()), typeSolver)));
} else if (tp.getBounds().size() == 1) {
ResolvedTypeParameterDeclaration.Bound bound = tp.getBounds().get(0);
if (bound.isExtends()) {
//expectedType = expectedType.replaceTypeVariables(tp.getName(), bound.getType());
expectedType = expectedType.replaceTypeVariables(tp, ResolvedWildcard.extendsBound(bound.getType()));
} else {
//expectedType = expectedType.replaceTypeVariables(tp.getName(), new ReferenceTypeUsageImpl(typeSolver.solveType(Object.class.getCanonicalName()), typeSolver));
expectedType = expectedType.replaceTypeVariables(tp, ResolvedWildcard.superBound(bound.getType()));
}
} else {
throw new UnsupportedOperationException();
}
}
ResolvedType expectedType2 = expectedTypeWithoutSubstitutions;
for (ResolvedTypeParameterDeclaration tp : typeParameters) {
if (tp.getBounds().isEmpty()) {
expectedType2 = expectedType2.replaceTypeVariables(tp, new ReferenceTypeImpl(typeSolver.solveType(Object.class.getCanonicalName()), typeSolver));
} else if (tp.getBounds().size() == 1) {
ResolvedTypeParameterDeclaration.Bound bound = tp.getBounds().get(0);
if (bound.isExtends()) {
expectedType2 = expectedType2.replaceTypeVariables(tp, bound.getType());
} else {
expectedType2 = expectedType2.replaceTypeVariables(tp, new ReferenceTypeImpl(typeSolver.solveType(Object.class.getCanonicalName()), typeSolver));
}
} else {
throw new UnsupportedOperationException();
}
}
if (!expectedType.isAssignableBy(actualType)
&& !expectedType2.isAssignableBy(actualType)
&& !expectedTypeWithInference.isAssignableBy(actualType)
&& !expectedTypeWithoutSubstitutions.isAssignableBy(actualType)) {
return false;
}
}
return true;
}
private static List<ResolvedMethodDeclaration> getMethodsWithoutDuplicates(List<ResolvedMethodDeclaration> methods) {
Set<ResolvedMethodDeclaration> s = new TreeSet<>((m1, m2) -> {
if (m1 instanceof JavaParserMethodDeclaration && m2 instanceof JavaParserMethodDeclaration &&
((JavaParserMethodDeclaration) m1).getWrappedNode().equals(((JavaParserMethodDeclaration) m2).getWrappedNode())) {
return 0;
}
return 1;
});
s.addAll(methods);
List<ResolvedMethodDeclaration> res = new ArrayList<>();
Set<String> usedSignatures = new HashSet<>();
for (ResolvedMethodDeclaration md : methods) {
String signature = md.getQualifiedSignature();
if (!usedSignatures.contains(signature)) {
usedSignatures.add(signature);
res.add(md);
}
}
return res;
}
/**
* @param methods we expect the methods to be ordered such that inherited methods are later in the list
* @param name
* @param argumentsTypes
* @param typeSolver
* @return
*/
public static SymbolReference<ResolvedMethodDeclaration> findMostApplicable(List<ResolvedMethodDeclaration> methods,
String name, List<ResolvedType> argumentsTypes, TypeSolver typeSolver) {
SymbolReference<ResolvedMethodDeclaration> res = findMostApplicable(methods, name, argumentsTypes, typeSolver, false);
if (res.isSolved()) {
return res;
}
return findMostApplicable(methods, name, argumentsTypes, typeSolver, true);
}
public static SymbolReference<ResolvedMethodDeclaration> findMostApplicable(List<ResolvedMethodDeclaration> methods,
String name, List<ResolvedType> argumentsTypes, TypeSolver typeSolver, boolean wildcardTolerance) {
List<ResolvedMethodDeclaration> methodsWithMatchingName = methods.stream().filter(m -> m.getName().equals(name)).collect(Collectors.toList());
List<ResolvedMethodDeclaration> applicableMethods = getMethodsWithoutDuplicates(methodsWithMatchingName).stream().filter((m) -> isApplicable(m, name, argumentsTypes, typeSolver, wildcardTolerance)).collect(Collectors.toList());
if (applicableMethods.isEmpty()) {
return SymbolReference.unsolved(ResolvedMethodDeclaration.class);
}
if (applicableMethods.size() > 1) {
List<Integer> nullParamIndexes = new ArrayList<>();
for (int i = 0; i < argumentsTypes.size(); i++) {
if (argumentsTypes.get(i).isNull()) {
nullParamIndexes.add(i);
}
}
if (!nullParamIndexes.isEmpty()) {
// remove method with array param if a non array exists and arg is null
Set<ResolvedMethodDeclaration> removeCandidates = new HashSet<>();
for (Integer nullParamIndex: nullParamIndexes) {
for (ResolvedMethodDeclaration methDecl: applicableMethods) {
if (methDecl.getParam(nullParamIndex.intValue()).getType().isArray()) {
removeCandidates.add(methDecl);
}
}
}
if (!removeCandidates.isEmpty() && removeCandidates.size() < applicableMethods.size()) {
applicableMethods.removeAll(removeCandidates);
}
}
}
if (applicableMethods.size() == 1) {
return SymbolReference.solved(applicableMethods.get(0));
} else {
ResolvedMethodDeclaration winningCandidate = applicableMethods.get(0);
ResolvedMethodDeclaration other = null;
boolean possibleAmbiguity = false;
for (int i = 1; i < applicableMethods.size(); i++) {
other = applicableMethods.get(i);
if (isMoreSpecific(winningCandidate, other, argumentsTypes, typeSolver)) {
possibleAmbiguity = false;
} else if (isMoreSpecific(other, winningCandidate, argumentsTypes, typeSolver)) {
possibleAmbiguity = false;
winningCandidate = other;
} else {
if (winningCandidate.declaringType().getQualifiedName().equals(other.declaringType().getQualifiedName())) {
possibleAmbiguity = true;
} else {
// we expect the methods to be ordered such that inherited methods are later in the list
}
}
}
if (possibleAmbiguity) {
// pick the first exact match if it exists
if (!isExactMatch(winningCandidate, argumentsTypes)) {
if (isExactMatch(other, argumentsTypes)) {
winningCandidate = other;
} else {
throw new MethodAmbiguityException("Ambiguous method call: cannot find a most applicable method: " + winningCandidate + ", " + other);
}
}
}
return SymbolReference.solved(winningCandidate);
}
}
protected static boolean isExactMatch(ResolvedMethodLikeDeclaration method, List<ResolvedType> argumentsTypes) {
for (int i = 0; i < method.getNumberOfParams(); i++) {
if (!method.getParam(i).getType().equals(argumentsTypes.get(i))) {
return false;
}
}
return true;
}
private static boolean isMoreSpecific(ResolvedMethodDeclaration methodA, ResolvedMethodDeclaration methodB,
List<ResolvedType> argumentTypes, TypeSolver typeSolver) {
boolean oneMoreSpecificFound = false;
if (methodA.getNumberOfParams() < methodB.getNumberOfParams()) {
return true;
}
if (methodA.getNumberOfParams() > methodB.getNumberOfParams()) {
return false;
}
for (int i = 0; i < methodA.getNumberOfParams(); i++) {
ResolvedType tdA = methodA.getParam(i).getType();
ResolvedType tdB = methodB.getParam(i).getType();
// B is more specific
if (tdB.isAssignableBy(tdA) && !tdA.isAssignableBy(tdB)) {
oneMoreSpecificFound = true;
}
// A is more specific
if (tdA.isAssignableBy(tdB) && !tdB.isAssignableBy(tdA)) {
return false;
}
}
if (!oneMoreSpecificFound) {
int lastIndex = argumentTypes.size() - 1;
if (methodA.hasVariadicParameter() && !methodB.hasVariadicParameter()) {
// if the last argument is an array then m1 is more specific
if (argumentTypes.get(lastIndex).isArray()) {
return true;
}
if (!argumentTypes.get(lastIndex).isArray()) {
return false;
}
}
if (!methodA.hasVariadicParameter() && methodB.hasVariadicParameter()) {
// if the last argument is an array and m1 is not variadic then
// it is not more specific
if (argumentTypes.get(lastIndex).isArray()) {
return false;
}
if (!argumentTypes.get(lastIndex).isArray()) {
return true;
}
}
}
return oneMoreSpecificFound;
}
private static boolean isMoreSpecific(MethodUsage methodA, MethodUsage methodB, TypeSolver typeSolver) {
boolean oneMoreSpecificFound = false;
for (int i = 0; i < methodA.getNoParams(); i++) {
ResolvedType tdA = methodA.getParamType(i);
ResolvedType tdB = methodB.getParamType(i);
boolean aIsAssignableByB = tdA.isAssignableBy(tdB);
boolean bIsAssignableByA = tdB.isAssignableBy(tdA);
// B is more specific
if (bIsAssignableByA && !aIsAssignableByB) {
oneMoreSpecificFound = true;
}
// A is more specific
if (aIsAssignableByB && !bIsAssignableByA) {
return false;
}
}
return oneMoreSpecificFound;
}
public static Optional<MethodUsage> findMostApplicableUsage(List<MethodUsage> methods, String name, List<ResolvedType> argumentsTypes, TypeSolver typeSolver) {
List<MethodUsage> applicableMethods = methods.stream().filter((m) -> isApplicable(m, name, argumentsTypes, typeSolver)).collect(Collectors.toList());
if (applicableMethods.isEmpty()) {
return Optional.empty();
}
if (applicableMethods.size() == 1) {
return Optional.of(applicableMethods.get(0));
} else {
MethodUsage winningCandidate = applicableMethods.get(0);
for (int i = 1; i < applicableMethods.size(); i++) {
MethodUsage other = applicableMethods.get(i);
if (isMoreSpecific(winningCandidate, other, typeSolver)) {
// nothing to do
} else if (isMoreSpecific(other, winningCandidate, typeSolver)) {
winningCandidate = other;
} else {
if (winningCandidate.declaringType().getQualifiedName().equals(other.declaringType().getQualifiedName())) {
if (!areOverride(winningCandidate, other)) {
throw new MethodAmbiguityException("Ambiguous method call: cannot find a most applicable method: " + winningCandidate + ", " + other + ". First declared in " + winningCandidate.declaringType().getQualifiedName());
}
} else {
// we expect the methods to be ordered such that inherited methods are later in the list
//throw new UnsupportedOperationException();
}
}
}
return Optional.of(winningCandidate);
}
}
private static boolean areOverride(MethodUsage winningCandidate, MethodUsage other) {
if (!winningCandidate.getName().equals(other.getName())) {
return false;
}
if (winningCandidate.getNoParams() != other.getNoParams()) {
return false;
}
for (int i = 0; i < winningCandidate.getNoParams(); i++) {
if (!winningCandidate.getParamTypes().get(i).equals(other.getParamTypes().get(i))) {
return false;
}
}
return true;
}
public static SymbolReference<ResolvedMethodDeclaration> solveMethodInType(ResolvedTypeDeclaration typeDeclaration,
String name, List<ResolvedType> argumentsTypes, TypeSolver typeSolver) {
return solveMethodInType(typeDeclaration, name, argumentsTypes, false, typeSolver);
}
/**
* Replace TypeDeclaration.solveMethod
*
* @param typeDeclaration
* @param name
* @param argumentsTypes
* @param staticOnly
* @return
*/
public static SymbolReference<ResolvedMethodDeclaration> solveMethodInType(ResolvedTypeDeclaration typeDeclaration,
String name, List<ResolvedType> argumentsTypes, boolean staticOnly,
TypeSolver typeSolver) {
if (typeDeclaration instanceof JavaParserClassDeclaration) {
Context ctx = ((JavaParserClassDeclaration) typeDeclaration).getContext();
return ctx.solveMethod(name, argumentsTypes, staticOnly, typeSolver);
}
if (typeDeclaration instanceof JavaParserInterfaceDeclaration) {
Context ctx = ((JavaParserInterfaceDeclaration) typeDeclaration).getContext();
return ctx.solveMethod(name, argumentsTypes, staticOnly, typeSolver);
}
if (typeDeclaration instanceof JavaParserEnumDeclaration) {
if (name.equals("values") && argumentsTypes.isEmpty()) {
return SymbolReference.solved(new JavaParserEnumDeclaration.ValuesMethod((JavaParserEnumDeclaration) typeDeclaration, typeSolver));
}
Context ctx = ((JavaParserEnumDeclaration) typeDeclaration).getContext();
return ctx.solveMethod(name, argumentsTypes, staticOnly, typeSolver);
}
if (typeDeclaration instanceof JavaParserAnonymousClassDeclaration) {
Context ctx = ((JavaParserAnonymousClassDeclaration) typeDeclaration).getContext();
return ctx.solveMethod(name, argumentsTypes, staticOnly, typeSolver);
}
if (typeDeclaration instanceof ReflectionClassDeclaration) {
return ((ReflectionClassDeclaration) typeDeclaration).solveMethod(name, argumentsTypes, staticOnly);
}
if (typeDeclaration instanceof ReflectionInterfaceDeclaration) {
return ((ReflectionInterfaceDeclaration) typeDeclaration).solveMethod(name, argumentsTypes, staticOnly);
}
if (typeDeclaration instanceof ReflectionEnumDeclaration) {
return ((ReflectionEnumDeclaration) typeDeclaration).solveMethod(name, argumentsTypes, staticOnly);
}
if (typeDeclaration instanceof JavassistInterfaceDeclaration) {
return ((JavassistInterfaceDeclaration) typeDeclaration).solveMethod(name, argumentsTypes, staticOnly);
}
if (typeDeclaration instanceof JavassistClassDeclaration) {
return ((JavassistClassDeclaration) typeDeclaration).solveMethod(name, argumentsTypes, staticOnly);
}
if (typeDeclaration instanceof JavassistEnumDeclaration) {
return ((JavassistEnumDeclaration) typeDeclaration).solveMethod(name, argumentsTypes, staticOnly);
}
throw new UnsupportedOperationException(typeDeclaration.getClass().getCanonicalName());
}
private static void inferTypes(ResolvedType source, ResolvedType target, Map<ResolvedTypeParameterDeclaration, ResolvedType> mappings) {
if (source.equals(target)) {
return;
}
if (source.isReferenceType() && target.isReferenceType()) {
ResolvedReferenceType sourceRefType = source.asReferenceType();
ResolvedReferenceType targetRefType = target.asReferenceType();
if (sourceRefType.getQualifiedName().equals(targetRefType.getQualifiedName())) {
if (!sourceRefType.isRawType() && !targetRefType.isRawType()) {
for (int i = 0; i < sourceRefType.typeParametersValues().size(); i++) {
inferTypes(sourceRefType.typeParametersValues().get(i), targetRefType.typeParametersValues().get(i), mappings);
}
}
}
return;
}
if (source.isReferenceType() && target.isWildcard()) {
if (target.asWildcard().isBounded()) {
inferTypes(source, target.asWildcard().getBoundedType(), mappings);
return;
}
return;
}
if (source.isWildcard() && target.isWildcard()) {
return;
}
if (source.isReferenceType() && target.isTypeVariable()) {
mappings.put(target.asTypeParameter(), source);
return;
}
if (source.isWildcard() && target.isReferenceType()){
if (source.asWildcard().isBounded()){
inferTypes(source.asWildcard().getBoundedType(), target, mappings);
}
return;
}
if (source.isWildcard() && target.isTypeVariable()) {
mappings.put(target.asTypeParameter(), source);
return;
}
if (source.isTypeVariable() && target.isTypeVariable()) {
mappings.put(target.asTypeParameter(), source);
return;
}
if (source.isPrimitive() || target.isPrimitive()) {
return;
}
if (source.isNull()) {
return;
}
}
}
| 1 | 12,685 | I would throw an exception if the type is not solved | javaparser-javaparser | java |
@@ -52,11 +52,8 @@ func TestDifficulty(t *testing.T) {
// files are 2 years old, contains strange values
dt.skipLoad("difficultyCustomHomestead\\.json")
- dt.skipLoad("difficultyMorden\\.json")
- dt.skipLoad("difficultyOlimpic\\.json")
dt.config("Ropsten", *params.RopstenChainConfig)
- dt.config("Morden", *params.RopstenChainConfig)
dt.config("Frontier", params.ChainConfig{})
dt.config("Homestead", params.ChainConfig{ | 1 | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tests
import (
"math/big"
"testing"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/params"
)
var (
mainnetChainConfig = params.ChainConfig{
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(1150000),
DAOForkBlock: big.NewInt(1920000),
DAOForkSupport: true,
EIP150Block: big.NewInt(2463000),
EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"),
EIP155Block: big.NewInt(2675000),
EIP158Block: big.NewInt(2675000),
ByzantiumBlock: big.NewInt(4370000),
}
)
func TestDifficulty(t *testing.T) {
t.Parallel()
dt := new(testMatcher)
// Not difficulty-tests
dt.skipLoad("hexencodetest.*")
dt.skipLoad("crypto.*")
dt.skipLoad("blockgenesistest\\.json")
dt.skipLoad("genesishashestest\\.json")
dt.skipLoad("keyaddrtest\\.json")
dt.skipLoad("txtest\\.json")
// files are 2 years old, contains strange values
dt.skipLoad("difficultyCustomHomestead\\.json")
dt.skipLoad("difficultyMorden\\.json")
dt.skipLoad("difficultyOlimpic\\.json")
dt.config("Ropsten", *params.RopstenChainConfig)
dt.config("Morden", *params.RopstenChainConfig)
dt.config("Frontier", params.ChainConfig{})
dt.config("Homestead", params.ChainConfig{
HomesteadBlock: big.NewInt(0),
})
dt.config("Byzantium", params.ChainConfig{
ByzantiumBlock: big.NewInt(0),
})
dt.config("Frontier", *params.RopstenChainConfig)
dt.config("MainNetwork", mainnetChainConfig)
dt.config("CustomMainNetwork", mainnetChainConfig)
dt.config("Constantinople", params.ChainConfig{
ConstantinopleBlock: big.NewInt(0),
})
dt.config("EIP2384", params.ChainConfig{
MuirGlacierBlock: big.NewInt(0),
})
dt.config("difficulty.json", mainnetChainConfig)
dt.walk(t, difficultyTestDir, func(t *testing.T, name string, test *DifficultyTest) {
cfg := dt.findConfig(name)
if test.ParentDifficulty.Cmp(params.MinimumDifficulty) < 0 {
t.Skip("difficulty below minimum")
return
}
if err := dt.checkFailure(t, test.Run(cfg)); err != nil {
t.Error(err)
}
})
}
| 1 | 22,558 | if remove `dt.skipLoad` - then this tests will run. You probably mean opposite? | ledgerwatch-erigon | go |
@@ -0,0 +1,14 @@
+module Beaker
+ module Shared
+ module Timed
+
+ def run_and_report_duration &block
+ start = Time.now
+ block.call
+ Time.now - start
+ end
+
+ end
+ end
+end
+ | 1 | 1 | 4,686 | I feel like we do this in a lot of places should we move that out into its own PR? | voxpupuli-beaker | rb |
|
@@ -12,7 +12,7 @@ import 'css!./style';
fillImageElement(elem, source);
}
- async function itemBlurhashing(target, blurhashstr) {
+ function itemBlurhashing(target, blurhashstr) {
if (blurhash.isBlurhashValid(blurhashstr)) {
// Although the default values recommended by Blurhash developers is 32x32, a size of 18x18 seems to be the sweet spot for us,
// improving the performance and reducing the memory usage, while retaining almost full blur quality. | 1 | import * as lazyLoader from 'lazyLoader';
import * as userSettings from 'userSettings';
import * as blurhash from 'blurhash';
import 'css!./style';
/* eslint-disable indent */
export function lazyImage(elem, source = elem.getAttribute('data-src')) {
if (!source) {
return;
}
fillImageElement(elem, source);
}
async function itemBlurhashing(target, blurhashstr) {
if (blurhash.isBlurhashValid(blurhashstr)) {
// Although the default values recommended by Blurhash developers is 32x32, a size of 18x18 seems to be the sweet spot for us,
// improving the performance and reducing the memory usage, while retaining almost full blur quality.
// Lower values had more visible pixelation
let width = 18;
let height = 18;
let pixels;
try {
pixels = blurhash.decode(blurhashstr, width, height);
} catch (err) {
console.error('Blurhash decode error: ', err);
target.classList.add('non-blurhashable');
return;
}
let canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
let ctx = canvas.getContext('2d');
let imgData = ctx.createImageData(width, height);
imgData.data.set(pixels);
ctx.putImageData(imgData, 0, 0);
let child = target.appendChild(canvas);
child.classList.add('blurhash-canvas');
child.style.opacity = 1;
if (userSettings.enableFastFadein()) {
child.classList.add('lazy-blurhash-fadein-fast');
} else {
child.classList.add('lazy-blurhash-fadein');
}
target.classList.add('blurhashed');
target.removeAttribute('data-blurhash');
}
}
function switchCanvas(elem) {
let child = elem.getElementsByClassName('blurhash-canvas')[0];
if (child) {
child.style.opacity = elem.getAttribute('data-src') ? 1 : 0;
}
}
export function fillImage(entry) {
if (!entry) {
throw new Error('entry cannot be null');
}
let target = entry.target;
var source = undefined;
if (target) {
source = target.getAttribute('data-src');
var blurhashstr = target.getAttribute('data-blurhash');
} else {
source = entry;
}
if (userSettings.enableBlurhash()) {
if (!target.classList.contains('blurhashed', 'non-blurhashable') && blurhashstr) {
itemBlurhashing(target, blurhashstr);
} else if (!blurhashstr && !target.classList.contains('blurhashed')) {
target.classList.add('non-blurhashable');
}
}
if (entry.intersectionRatio > 0) {
if (source) fillImageElement(target, source);
} else if (!source) {
emptyImageElement(target);
}
}
function fillImageElement(elem, url) {
if (url === undefined) {
throw new TypeError('url cannot be undefined');
}
let preloaderImg = new Image();
preloaderImg.src = url;
// This is necessary here, so changing blurhash settings without reloading the page works
if (!userSettings.enableBlurhash() || elem.classList.contains('non-blurhashable')) {
elem.classList.add('lazy-hidden');
}
preloaderImg.addEventListener('load', () => {
if (elem.tagName !== 'IMG') {
elem.style.backgroundImage = "url('" + url + "')";
} else {
elem.setAttribute('src', url);
}
elem.removeAttribute('data-src');
if (elem.classList.contains('non-blurhashable') || !userSettings.enableBlurhash()) {
elem.classList.remove('lazy-hidden');
if (userSettings.enableFastFadein()) {
elem.classList.add('lazy-image-fadein-fast');
} else {
elem.classList.add('lazy-image-fadein');
}
} else {
switchCanvas(elem);
}
});
}
function emptyImageElement(elem) {
var url;
if (elem.tagName !== 'IMG') {
url = elem.style.backgroundImage.slice(4, -1).replace(/"/g, '');
elem.style.backgroundImage = 'none';
} else {
url = elem.getAttribute('src');
elem.setAttribute('src', '');
}
elem.setAttribute('data-src', url);
if (elem.classList.contains('non-blurhashable') || !userSettings.enableBlurhash()) {
elem.classList.remove('lazy-image-fadein-fast', 'lazy-image-fadein');
elem.classList.add('lazy-hidden');
} else {
switchCanvas(elem);
}
}
export function lazyChildren(elem) {
lazyLoader.lazyChildren(elem, fillImage);
}
export function getPrimaryImageAspectRatio(items) {
var values = [];
for (var i = 0, length = items.length; i < length; i++) {
var ratio = items[i].PrimaryImageAspectRatio || 0;
if (!ratio) {
continue;
}
values[values.length] = ratio;
}
if (!values.length) {
return null;
}
// Use the median
values.sort(function (a, b) {
return a - b;
});
var half = Math.floor(values.length / 2);
var result;
if (values.length % 2) {
result = values[half];
} else {
result = (values[half - 1] + values[half]) / 2.0;
}
// If really close to 2:3 (poster image), just return 2:3
var aspect2x3 = 2 / 3;
if (Math.abs(aspect2x3 - result) <= 0.15) {
return aspect2x3;
}
// If really close to 16:9 (episode image), just return 16:9
var aspect16x9 = 16 / 9;
if (Math.abs(aspect16x9 - result) <= 0.2) {
return aspect16x9;
}
// If really close to 1 (square image), just return 1
if (Math.abs(1 - result) <= 0.15) {
return 1;
}
// If really close to 4:3 (poster image), just return 2:3
var aspect4x3 = 4 / 3;
if (Math.abs(aspect4x3 - result) <= 0.15) {
return aspect4x3;
}
return result;
}
export function fillImages(elems) {
for (var i = 0, length = elems.length; i < length; i++) {
var elem = elems[0];
fillImage(elem);
}
}
/* eslint-enable indent */
export default {
fillImages: fillImages,
fillImage: fillImage,
lazyImage: lazyImage,
lazyChildren: lazyChildren,
getPrimaryImageAspectRatio: getPrimaryImageAspectRatio
};
| 1 | 16,369 | Already asked you in Matrix, but I'll do it here again so it can be discussed publicly. Why? | jellyfin-jellyfin-web | js |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.