patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -23,6 +23,8 @@ type KeyExchangeFunction func() crypto.KeyExchange
// The CryptoSetupServer handles all things crypto for the Session
type cryptoSetupServer struct {
+ mutex sync.RWMutex
+
connID protocol.ConnectionID
remoteAddr net.Addr
scfg *ServerConfig | 1 | package handshake
import (
"bytes"
"crypto/rand"
"encoding/binary"
"errors"
"io"
"net"
"sync"
"github.com/lucas-clemente/quic-go/internal/crypto"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/lucas-clemente/quic-go/qerr"
)
// QuicCryptoKeyDerivationFunction is used for key derivation
type QuicCryptoKeyDerivationFunction func(forwardSecure bool, sharedSecret, nonces []byte, connID protocol.ConnectionID, chlo []byte, scfg []byte, cert []byte, divNonce []byte, pers protocol.Perspective) (crypto.AEAD, error)
// KeyExchangeFunction is used to make a new KEX
type KeyExchangeFunction func() crypto.KeyExchange
// The CryptoSetupServer handles all things crypto for the Session
type cryptoSetupServer struct {
connID protocol.ConnectionID
remoteAddr net.Addr
scfg *ServerConfig
diversificationNonce []byte
version protocol.VersionNumber
supportedVersions []protocol.VersionNumber
acceptSTKCallback func(net.Addr, *Cookie) bool
nullAEAD crypto.AEAD
secureAEAD crypto.AEAD
forwardSecureAEAD crypto.AEAD
receivedForwardSecurePacket bool
receivedSecurePacket bool
sentSHLO chan struct{} // this channel is closed as soon as the SHLO has been written
receivedParams bool
paramsChan chan<- TransportParameters
handshakeEvent chan<- struct{}
keyDerivation QuicCryptoKeyDerivationFunction
keyExchange KeyExchangeFunction
cryptoStream io.ReadWriter
params *TransportParameters
mutex sync.RWMutex
}
var _ CryptoSetup = &cryptoSetupServer{}
// ErrHOLExperiment is returned when the client sends the FHL2 tag in the CHLO.
// This is an experiment implemented by Chrome in QUIC 36, which we don't support.
// TODO: remove this when dropping support for QUIC 36
var ErrHOLExperiment = qerr.Error(qerr.InvalidCryptoMessageParameter, "HOL experiment. Unsupported")
// ErrNSTPExperiment is returned when the client sends the NSTP tag in the CHLO.
// This is an experiment implemented by Chrome in QUIC 38, which we don't support at this point.
var ErrNSTPExperiment = qerr.Error(qerr.InvalidCryptoMessageParameter, "NSTP experiment. Unsupported")
// NewCryptoSetup creates a new CryptoSetup instance for a server
func NewCryptoSetup(
cryptoStream io.ReadWriter,
connID protocol.ConnectionID,
remoteAddr net.Addr,
version protocol.VersionNumber,
scfg *ServerConfig,
params *TransportParameters,
supportedVersions []protocol.VersionNumber,
acceptSTK func(net.Addr, *Cookie) bool,
paramsChan chan<- TransportParameters,
handshakeEvent chan<- struct{},
) (CryptoSetup, error) {
nullAEAD, err := crypto.NewNullAEAD(protocol.PerspectiveServer, connID, version)
if err != nil {
return nil, err
}
return &cryptoSetupServer{
cryptoStream: cryptoStream,
connID: connID,
remoteAddr: remoteAddr,
version: version,
supportedVersions: supportedVersions,
scfg: scfg,
keyDerivation: crypto.DeriveQuicCryptoAESKeys,
keyExchange: getEphermalKEX,
nullAEAD: nullAEAD,
params: params,
acceptSTKCallback: acceptSTK,
sentSHLO: make(chan struct{}),
paramsChan: paramsChan,
handshakeEvent: handshakeEvent,
}, nil
}
// HandleCryptoStream reads and writes messages on the crypto stream
func (h *cryptoSetupServer) HandleCryptoStream() error {
for {
var chloData bytes.Buffer
message, err := ParseHandshakeMessage(io.TeeReader(h.cryptoStream, &chloData))
if err != nil {
return qerr.HandshakeFailed
}
if message.Tag != TagCHLO {
return qerr.InvalidCryptoMessageType
}
utils.Debugf("Got %s", message)
done, err := h.handleMessage(chloData.Bytes(), message.Data)
if err != nil {
return err
}
if done {
return nil
}
}
}
func (h *cryptoSetupServer) handleMessage(chloData []byte, cryptoData map[Tag][]byte) (bool, error) {
if _, isHOLExperiment := cryptoData[TagFHL2]; isHOLExperiment {
return false, ErrHOLExperiment
}
if _, isNSTPExperiment := cryptoData[TagNSTP]; isNSTPExperiment {
return false, ErrNSTPExperiment
}
sniSlice, ok := cryptoData[TagSNI]
if !ok {
return false, qerr.Error(qerr.CryptoMessageParameterNotFound, "SNI required")
}
sni := string(sniSlice)
if sni == "" {
return false, qerr.Error(qerr.CryptoMessageParameterNotFound, "SNI required")
}
// prevent version downgrade attacks
// see https://groups.google.com/a/chromium.org/forum/#!topic/proto-quic/N-de9j63tCk for a discussion and examples
verSlice, ok := cryptoData[TagVER]
if !ok {
return false, qerr.Error(qerr.InvalidCryptoMessageParameter, "client hello missing version tag")
}
if len(verSlice) != 4 {
return false, qerr.Error(qerr.InvalidCryptoMessageParameter, "incorrect version tag")
}
ver := protocol.VersionNumber(binary.BigEndian.Uint32(verSlice))
// If the client's preferred version is not the version we are currently speaking, then the client went through a version negotiation. In this case, we need to make sure that we actually do not support this version and that it wasn't a downgrade attack.
if ver != h.version && protocol.IsSupportedVersion(h.supportedVersions, ver) {
return false, qerr.Error(qerr.VersionNegotiationMismatch, "Downgrade attack detected")
}
var reply []byte
var err error
certUncompressed, err := h.scfg.certChain.GetLeafCert(sni)
if err != nil {
return false, err
}
params, err := readHelloMap(cryptoData)
if err != nil {
return false, err
}
// blocks until the session has received the parameters
if !h.receivedParams {
h.receivedParams = true
h.paramsChan <- *params
}
if !h.isInchoateCHLO(cryptoData, certUncompressed) {
// We have a CHLO with a proper server config ID, do a 0-RTT handshake
reply, err = h.handleCHLO(sni, chloData, cryptoData)
if err != nil {
return false, err
}
if _, err := h.cryptoStream.Write(reply); err != nil {
return false, err
}
h.handshakeEvent <- struct{}{}
close(h.sentSHLO)
return true, nil
}
// We have an inchoate or non-matching CHLO, we now send a rejection
reply, err = h.handleInchoateCHLO(sni, chloData, cryptoData)
if err != nil {
return false, err
}
_, err = h.cryptoStream.Write(reply)
return false, err
}
// Open a message
func (h *cryptoSetupServer) Open(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) ([]byte, protocol.EncryptionLevel, error) {
h.mutex.RLock()
defer h.mutex.RUnlock()
if h.forwardSecureAEAD != nil {
res, err := h.forwardSecureAEAD.Open(dst, src, packetNumber, associatedData)
if err == nil {
if !h.receivedForwardSecurePacket { // this is the first forward secure packet we receive from the client
h.receivedForwardSecurePacket = true
// wait for the send on the handshakeEvent chan
<-h.sentSHLO
close(h.handshakeEvent)
}
return res, protocol.EncryptionForwardSecure, nil
}
if h.receivedForwardSecurePacket {
return nil, protocol.EncryptionUnspecified, err
}
}
if h.secureAEAD != nil {
res, err := h.secureAEAD.Open(dst, src, packetNumber, associatedData)
if err == nil {
h.receivedSecurePacket = true
return res, protocol.EncryptionSecure, nil
}
if h.receivedSecurePacket {
return nil, protocol.EncryptionUnspecified, err
}
}
res, err := h.nullAEAD.Open(dst, src, packetNumber, associatedData)
if err != nil {
return res, protocol.EncryptionUnspecified, err
}
return res, protocol.EncryptionUnencrypted, err
}
func (h *cryptoSetupServer) GetSealer() (protocol.EncryptionLevel, Sealer) {
h.mutex.RLock()
defer h.mutex.RUnlock()
if h.forwardSecureAEAD != nil {
return protocol.EncryptionForwardSecure, h.forwardSecureAEAD
}
return protocol.EncryptionUnencrypted, h.nullAEAD
}
func (h *cryptoSetupServer) GetSealerForCryptoStream() (protocol.EncryptionLevel, Sealer) {
h.mutex.RLock()
defer h.mutex.RUnlock()
if h.secureAEAD != nil {
return protocol.EncryptionSecure, h.secureAEAD
}
return protocol.EncryptionUnencrypted, h.nullAEAD
}
func (h *cryptoSetupServer) GetSealerWithEncryptionLevel(encLevel protocol.EncryptionLevel) (Sealer, error) {
h.mutex.RLock()
defer h.mutex.RUnlock()
switch encLevel {
case protocol.EncryptionUnencrypted:
return h.nullAEAD, nil
case protocol.EncryptionSecure:
if h.secureAEAD == nil {
return nil, errors.New("CryptoSetupServer: no secureAEAD")
}
return h.secureAEAD, nil
case protocol.EncryptionForwardSecure:
if h.forwardSecureAEAD == nil {
return nil, errors.New("CryptoSetupServer: no forwardSecureAEAD")
}
return h.forwardSecureAEAD, nil
}
return nil, errors.New("CryptoSetupServer: no encryption level specified")
}
func (h *cryptoSetupServer) isInchoateCHLO(cryptoData map[Tag][]byte, cert []byte) bool {
if _, ok := cryptoData[TagPUBS]; !ok {
return true
}
scid, ok := cryptoData[TagSCID]
if !ok || !bytes.Equal(h.scfg.ID, scid) {
return true
}
xlctTag, ok := cryptoData[TagXLCT]
if !ok || len(xlctTag) != 8 {
return true
}
xlct := binary.LittleEndian.Uint64(xlctTag)
if crypto.HashCert(cert) != xlct {
return true
}
return !h.acceptSTK(cryptoData[TagSTK])
}
func (h *cryptoSetupServer) acceptSTK(token []byte) bool {
stk, err := h.scfg.cookieGenerator.DecodeToken(token)
if err != nil {
utils.Debugf("STK invalid: %s", err.Error())
return false
}
return h.acceptSTKCallback(h.remoteAddr, stk)
}
func (h *cryptoSetupServer) handleInchoateCHLO(sni string, chlo []byte, cryptoData map[Tag][]byte) ([]byte, error) {
token, err := h.scfg.cookieGenerator.NewToken(h.remoteAddr)
if err != nil {
return nil, err
}
replyMap := map[Tag][]byte{
TagSCFG: h.scfg.Get(),
TagSTK: token,
TagSVID: []byte("quic-go"),
}
if h.acceptSTK(cryptoData[TagSTK]) {
proof, err := h.scfg.Sign(sni, chlo)
if err != nil {
return nil, err
}
commonSetHashes := cryptoData[TagCCS]
cachedCertsHashes := cryptoData[TagCCRT]
certCompressed, err := h.scfg.GetCertsCompressed(sni, commonSetHashes, cachedCertsHashes)
if err != nil {
return nil, err
}
// Token was valid, send more details
replyMap[TagPROF] = proof
replyMap[TagCERT] = certCompressed
}
message := HandshakeMessage{
Tag: TagREJ,
Data: replyMap,
}
var serverReply bytes.Buffer
message.Write(&serverReply)
utils.Debugf("Sending %s", message)
return serverReply.Bytes(), nil
}
func (h *cryptoSetupServer) handleCHLO(sni string, data []byte, cryptoData map[Tag][]byte) ([]byte, error) {
// We have a CHLO matching our server config, we can continue with the 0-RTT handshake
sharedSecret, err := h.scfg.kex.CalculateSharedKey(cryptoData[TagPUBS])
if err != nil {
return nil, err
}
h.mutex.Lock()
defer h.mutex.Unlock()
certUncompressed, err := h.scfg.certChain.GetLeafCert(sni)
if err != nil {
return nil, err
}
serverNonce := make([]byte, 32)
if _, err = rand.Read(serverNonce); err != nil {
return nil, err
}
h.diversificationNonce = make([]byte, 32)
if _, err = rand.Read(h.diversificationNonce); err != nil {
return nil, err
}
clientNonce := cryptoData[TagNONC]
err = h.validateClientNonce(clientNonce)
if err != nil {
return nil, err
}
aead := cryptoData[TagAEAD]
if !bytes.Equal(aead, []byte("AESG")) {
return nil, qerr.Error(qerr.CryptoNoSupport, "Unsupported AEAD or KEXS")
}
kexs := cryptoData[TagKEXS]
if !bytes.Equal(kexs, []byte("C255")) {
return nil, qerr.Error(qerr.CryptoNoSupport, "Unsupported AEAD or KEXS")
}
h.secureAEAD, err = h.keyDerivation(
false,
sharedSecret,
clientNonce,
h.connID,
data,
h.scfg.Get(),
certUncompressed,
h.diversificationNonce,
protocol.PerspectiveServer,
)
if err != nil {
return nil, err
}
h.handshakeEvent <- struct{}{}
// Generate a new curve instance to derive the forward secure key
var fsNonce bytes.Buffer
fsNonce.Write(clientNonce)
fsNonce.Write(serverNonce)
ephermalKex := h.keyExchange()
ephermalSharedSecret, err := ephermalKex.CalculateSharedKey(cryptoData[TagPUBS])
if err != nil {
return nil, err
}
h.forwardSecureAEAD, err = h.keyDerivation(
true,
ephermalSharedSecret,
fsNonce.Bytes(),
h.connID,
data,
h.scfg.Get(),
certUncompressed,
nil,
protocol.PerspectiveServer,
)
if err != nil {
return nil, err
}
replyMap := h.params.getHelloMap()
// add crypto parameters
verTag := &bytes.Buffer{}
for _, v := range protocol.GetGreasedVersions(h.supportedVersions) {
utils.BigEndian.WriteUint32(verTag, uint32(v))
}
replyMap[TagPUBS] = ephermalKex.PublicKey()
replyMap[TagSNO] = serverNonce
replyMap[TagVER] = verTag.Bytes()
// note that the SHLO *has* to fit into one packet
message := HandshakeMessage{
Tag: TagSHLO,
Data: replyMap,
}
var reply bytes.Buffer
message.Write(&reply)
utils.Debugf("Sending %s", message)
return reply.Bytes(), nil
}
// DiversificationNonce returns the diversification nonce
func (h *cryptoSetupServer) DiversificationNonce() []byte {
return h.diversificationNonce
}
func (h *cryptoSetupServer) SetDiversificationNonce(data []byte) {
panic("not needed for cryptoSetupServer")
}
func (h *cryptoSetupServer) validateClientNonce(nonce []byte) error {
if len(nonce) != 32 {
return qerr.Error(qerr.InvalidCryptoMessageParameter, "invalid client nonce length")
}
if !bytes.Equal(nonce[4:12], h.scfg.obit) {
return qerr.Error(qerr.InvalidCryptoMessageParameter, "OBIT not matching")
}
return nil
}
| 1 | 7,263 | This mutex is never used. Should it be, if users can now make calls into the crypto setup? | lucas-clemente-quic-go | go |
@@ -25,6 +25,9 @@ module Beaker
host.exec(Command.new("w32tm /config /manualpeerlist:#{NTPSERVER} /syncfromflags:manual /update"))
host.exec(Command.new("w32tm /resync"))
@logger.notify "NTP date succeeded on #{host}"
+ elsif host['platform'].include? 'sles'
+ host.exec(Command.new("sntp #{NTPSERVER}"))
+ @logger.notify "NTP date succeeded on #{host}"
else
success=false
try = 0 | 1 | module Beaker
module Utils
class NTPControl
NTPSERVER = 'pool.ntp.org'
SLEEPWAIT = 5
TRIES = 5
def initialize(options, hosts)
@options = options.dup
@hosts = hosts
@logger = options[:logger]
end
def timesync
@logger.notify "Update system time sync"
@logger.notify "run ntpdate against NTP pool systems"
@hosts.each do |host|
if host['platform'].include? 'solaris-10'
host.exec(Command.new("sleep 10 && ntpdate -w #{NTPSERVER}"))
@logger.notify "NTP date succeeded on #{host}"
elsif host['platform'].include? 'windows'
# The exit code of 5 is for Windows 2008 systems where the w32tm /register command
# is not actually necessary.
host.exec(Command.new("w32tm /register"), :acceptable_exit_codes => [0,5])
host.exec(Command.new("net start w32time"), :acceptable_exit_codes => [0,2])
host.exec(Command.new("w32tm /config /manualpeerlist:#{NTPSERVER} /syncfromflags:manual /update"))
host.exec(Command.new("w32tm /resync"))
@logger.notify "NTP date succeeded on #{host}"
else
success=false
try = 0
until try >= TRIES do
try += 1
if host.exec(Command.new("ntpdate -t 20 #{NTPSERVER}"), :acceptable_exit_codes => (0..255)).exit_code == 0
success=true
break
end
sleep SLEEPWAIT
end
if success
@logger.notify "NTP date succeeded on #{host} after #{try} tries"
else
raise "NTP date was not successful after #{try} tries"
end
end
end
rescue => e
report_and_raise(@logger, e, "timesync (--ntp)")
end
end
end
end
| 1 | 5,318 | Check here for sles-, and i'd prefer a regex since I don't know what an 'include' is going to do exactly. :) | voxpupuli-beaker | rb |
@@ -65,7 +65,11 @@ class QR_Window(QWidget):
amount_text = ''
if amount:
if currency:
- self.amount = Decimal(amount) / self.exchanger.exchange(1, currency) if currency else amount
+ exch = self.exchanger.exchange(1, currency)
+ if exch == None:
+ self.amount = Decimal('0.0')
+ else:
+ self.amount = Decimal(amount) / exch if currency else amount
else:
self.amount = Decimal(amount)
self.amount = self.amount.quantize(Decimal('1.0000')) | 1 | import re
import platform
from decimal import Decimal
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
from electrum_gui.qt.qrcodewidget import QRCodeWidget
from electrum import bmp, pyqrnative, BasePlugin
from electrum.i18n import _
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
column_index = 4
class QR_Window(QWidget):
def __init__(self, exchanger):
QWidget.__init__(self)
self.exchanger = exchanger
self.setWindowTitle('Electrum - '+_('Invoice'))
self.setMinimumSize(800, 250)
self.address = ''
self.label = ''
self.amount = 0
self.setFocusPolicy(QtCore.Qt.NoFocus)
main_box = QHBoxLayout()
self.qrw = QRCodeWidget()
main_box.addWidget(self.qrw, 1)
vbox = QVBoxLayout()
main_box.addLayout(vbox)
self.address_label = QLabel("")
#self.address_label.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.address_label)
self.label_label = QLabel("")
vbox.addWidget(self.label_label)
self.amount_label = QLabel("")
vbox.addWidget(self.amount_label)
vbox.addStretch(1)
self.setLayout(main_box)
def set_content(self, addr, label, amount, currency):
self.address = addr
address_text = "<span style='font-size: 18pt'>%s</span>" % addr if addr else ""
self.address_label.setText(address_text)
if currency == 'BTC': currency = None
amount_text = ''
if amount:
if currency:
self.amount = Decimal(amount) / self.exchanger.exchange(1, currency) if currency else amount
else:
self.amount = Decimal(amount)
self.amount = self.amount.quantize(Decimal('1.0000'))
if currency:
amount_text += "<span style='font-size: 18pt'>%s %s</span><br/>" % (amount, currency)
amount_text += "<span style='font-size: 21pt'>%s</span> <span style='font-size: 16pt'>BTC</span> " % str(self.amount)
self.amount_label.setText(amount_text)
self.label = label
label_text = "<span style='font-size: 21pt'>%s</span>" % label if label else ""
self.label_label.setText(label_text)
msg = 'bitcoin:'+self.address
if self.amount is not None:
msg += '?amount=%s'%(str( self.amount))
if self.label is not None:
msg += '&label=%s'%(self.label)
elif self.label is not None:
msg += '?label=%s'%(self.label)
self.qrw.set_addr( msg )
class Plugin(BasePlugin):
def fullname(self):
return 'Point of Sale'
def description(self):
return _('Show QR code window and amounts requested for each address. Add menu item to request amount.')+_(' Note: This requires the exchange rate plugin to be installed.')
def init(self):
self.window = self.gui.main_window
self.wallet = self.window.wallet
self.qr_window = None
self.merchant_name = self.config.get('merchant_name', 'Invoice')
self.window.expert_mode = True
self.window.receive_list.setColumnCount(5)
self.window.receive_list.setHeaderLabels([ _('Address'), _('Label'), _('Balance'), _('Tx'), _('Request')])
self.requested_amounts = {}
self.toggle_QR_window(True)
def enable(self):
if not self.config.get('use_exchange_rate'):
self.gui.main_window.show_message("Please enable exchange rates first!")
return False
return BasePlugin.enable(self)
def load_wallet(self, wallet):
self.wallet = wallet
self.requested_amounts = self.wallet.storage.get('requested_amounts',{})
def close(self):
self.window.receive_list.setHeaderLabels([ _('Address'), _('Label'), _('Balance'), _('Tx')])
self.window.receive_list.setColumnCount(4)
for i,width in enumerate(self.window.column_widths['receive']):
self.window.receive_list.setColumnWidth(i, width)
self.toggle_QR_window(False)
def close_main_window(self):
if self.qr_window:
self.qr_window.close()
self.qr_window = None
def timer_actions(self):
if self.qr_window:
self.qr_window.qrw.update_qr()
def toggle_QR_window(self, show):
if show and not self.qr_window:
self.qr_window = QR_Window(self.gui.exchanger)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
item = self.window.receive_list.currentItem()
if item:
address = str(item.text(1))
label = self.wallet.labels.get(address)
amount, currency = self.requested_amounts.get(address, (None, None))
self.qr_window.set_content( address, label, amount, currency )
elif show and self.qr_window and not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
elif not show and self.qr_window and self.qr_window.isVisible():
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
def update_receive_item(self, address, item):
try:
amount, currency = self.requested_amounts.get(address, (None, None))
except Exception:
print "cannot get requested amount", address, self.requested_amounts.get(address)
amount, currency = None, None
self.requested_amounts.pop(address)
amount_str = amount + (' ' + currency if currency else '') if amount is not None else ''
item.setData(column_index,0,amount_str)
def current_item_changed(self, a):
if not self.wallet:
return
if a is not None and self.qr_window and self.qr_window.isVisible():
address = str(a.text(0))
label = self.wallet.labels.get(address)
try:
amount, currency = self.requested_amounts.get(address, (None, None))
except Exception:
amount, currency = None, None
self.qr_window.set_content( address, label, amount, currency )
def item_changed(self, item, column):
if column != column_index:
return
address = str( item.text(0) )
text = str( item.text(column) )
try:
seq = self.wallet.get_address_index(address)
index = seq[1][1]
except Exception:
print "cannot get index"
return
text = text.strip().upper()
#print text
m = re.match('^(\d*(|\.\d*))\s*(|BTC|EUR|USD|GBP|CNY|JPY|RUB|BRL)$', text)
if m and m.group(1) and m.group(1)!='.':
amount = m.group(1)
currency = m.group(3)
if not currency:
currency = 'BTC'
else:
currency = currency.upper()
self.requested_amounts[address] = (amount, currency)
self.wallet.storage.put('requested_amounts', self.requested_amounts, True)
label = self.wallet.labels.get(address)
if label is None:
label = self.merchant_name + ' - %04d'%(index+1)
self.wallet.labels[address] = label
if self.qr_window:
self.qr_window.set_content( address, label, amount, currency )
else:
item.setText(column,'')
if address in self.requested_amounts:
self.requested_amounts.pop(address)
self.window.update_receive_item(self.window.receive_list.currentItem())
def edit_amount(self):
l = self.window.receive_list
item = l.currentItem()
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
l.editItem( item, column_index )
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
def receive_menu(self, menu):
menu.addAction(_("Request amount"), self.edit_amount)
menu.addAction(_("Show Invoice"), lambda: self.toggle_QR_window(True))
| 1 | 10,642 | Should this be `None` instead? | spesmilo-electrum | py |
@@ -171,7 +171,14 @@ public final class ConfigUtil {
}
String cseKey = CONFIG_CSE_PREFIX + key.substring(key.indexOf(".") + 1);
- source.addProperty(cseKey, source.getProperty(key));
+ if (!source.containsKey(cseKey)) {
+ source.addProperty(cseKey, source.getProperty(key));
+ } else {
+ LOGGER
+ .warn(
+ "Key {} with an ambiguous item {} exists, please use the same prefix or will get unexpected merged value.",
+ key, cseKey);
+ }
}
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.config;
import static org.apache.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_CSE_PREFIX;
import static org.apache.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_KEY_SPLITER;
import static org.apache.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_SERVICECOMB_PREFIX;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.configuration.AbstractConfiguration;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.EnvironmentConfiguration;
import org.apache.commons.configuration.SystemConfiguration;
import org.apache.servicecomb.config.archaius.scheduler.NeverStartPollingScheduler;
import org.apache.servicecomb.config.archaius.sources.ConfigModel;
import org.apache.servicecomb.config.archaius.sources.MicroserviceConfigLoader;
import org.apache.servicecomb.config.archaius.sources.MicroserviceConfigurationSource;
import org.apache.servicecomb.config.spi.ConfigCenterConfigurationSource;
import org.apache.servicecomb.foundation.common.utils.SPIServiceUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.config.ConcurrentCompositeConfiguration;
import com.netflix.config.ConcurrentMapConfiguration;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.DynamicConfiguration;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicWatchedConfiguration;
import com.netflix.config.WatchedUpdateListener;
import com.netflix.config.WatchedUpdateResult;
public final class ConfigUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(ConfigUtil.class);
private static final String MICROSERVICE_CONFIG_LOADER_KEY = "cse-microservice-config-loader";
private static Map<String, Object> localConfig = new HashMap<>();
/**
* <p>The configurations not read by ServiceComb.</p>
* <p>
* For example, this map can store the configurations read by SpringBoot from application.properties,
* If users write the configurations of ServiceComb into application.yml instead of microservice.yaml,
* this can help {@link ConfigUtil} load config correctly.
* </p>
*/
private static final Map<String, Map<String, Object>> EXTRA_CONFIG_MAP = new LinkedHashMap<>();
private ConfigUtil() {
}
public static void setConfigs(Map<String, Object> config) {
localConfig = config;
}
public static void addConfig(String key, Object value) {
localConfig.put(key, value);
}
public static Object getProperty(String key) {
Object config = DynamicPropertyFactory.getBackingConfigurationSource();
return getProperty(config, key);
}
public static Object getProperty(Object config, String key) {
if (null != config && Configuration.class.isInstance(config)) {
Configuration configuration = (Configuration) config;
return configuration.getProperty(key);
}
return null;
}
private static void setMicroserviceConfigLoader(Configuration config, MicroserviceConfigLoader loader) {
config.setProperty(MICROSERVICE_CONFIG_LOADER_KEY, loader);
}
public static MicroserviceConfigLoader getMicroserviceConfigLoader() {
return (MicroserviceConfigLoader) getProperty(MICROSERVICE_CONFIG_LOADER_KEY);
}
public static MicroserviceConfigLoader getMicroserviceConfigLoader(Configuration config) {
return (MicroserviceConfigLoader) getProperty(config, MICROSERVICE_CONFIG_LOADER_KEY);
}
public static ConcurrentCompositeConfiguration createLocalConfig() {
MicroserviceConfigLoader loader = new MicroserviceConfigLoader();
loader.loadAndSort();
if (localConfig.size() > 0) {
ConfigModel model = new ConfigModel();
model.setConfig(localConfig);
loader.getConfigModels().add(model);
}
LOGGER.info("create local config:");
for (ConfigModel configModel : loader.getConfigModels()) {
LOGGER.info(" {}.", configModel.getUrl());
}
ConcurrentCompositeConfiguration config = ConfigUtil.createLocalConfig(loader.getConfigModels());
ConfigUtil.setMicroserviceConfigLoader(config, loader);
return config;
}
public static ConcurrentCompositeConfiguration createLocalConfig(List<ConfigModel> configModelList) {
ConcurrentCompositeConfiguration config = new ConcurrentCompositeConfiguration();
duplicateServiceCombConfigToCse(config,
new ConcurrentMapConfiguration(new SystemConfiguration()),
"configFromSystem");
duplicateServiceCombConfigToCse(config,
convertEnvVariable(new ConcurrentMapConfiguration(new EnvironmentConfiguration())),
"configFromEnvironment");
// If there is extra configurations, add it into config.
EXTRA_CONFIG_MAP.entrySet()
.stream()
.filter(mapEntry -> !mapEntry.getValue().isEmpty())
.forEachOrdered(configMapEntry -> duplicateServiceCombConfigToCse(config,
new ConcurrentMapConfiguration(configMapEntry.getValue()),
configMapEntry.getKey()));
duplicateServiceCombConfigToCse(config,
new DynamicConfiguration(
new MicroserviceConfigurationSource(configModelList), new NeverStartPollingScheduler()),
"configFromYamlFile");
duplicateServiceCombConfigToCseAtFront(config,
new ConcurrentMapConfiguration(ConfigMapping.getConvertedMap(config)),
"configFromMapping");
return config;
}
public static AbstractConfiguration convertEnvVariable(AbstractConfiguration source) {
Iterator<String> keys = source.getKeys();
while (keys.hasNext()) {
String key = keys.next();
String[] separatedKey = key.split(CONFIG_KEY_SPLITER);
if (separatedKey.length == 1) {
continue;
}
String newKey = String.join(".", separatedKey);
source.addProperty(newKey, source.getProperty(key));
}
return source;
}
//inject a copy of cse.xxx for servicecomb.xxx
private static void duplicateServiceCombConfigToCse(AbstractConfiguration source) {
Iterator<String> keys = source.getKeys();
while (keys.hasNext()) {
String key = keys.next();
if (!key.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
continue;
}
String cseKey = CONFIG_CSE_PREFIX + key.substring(key.indexOf(".") + 1);
source.addProperty(cseKey, source.getProperty(key));
}
}
private static void duplicateServiceCombConfigToCse(ConcurrentCompositeConfiguration compositeConfiguration,
AbstractConfiguration source,
String sourceName) {
duplicateServiceCombConfigToCse(source);
compositeConfiguration.addConfiguration(source, sourceName);
}
private static void duplicateServiceCombConfigToCseAtFront(ConcurrentCompositeConfiguration compositeConfiguration,
AbstractConfiguration source,
String sourceName) {
duplicateServiceCombConfigToCse(source);
compositeConfiguration.addConfigurationAtFront(source, sourceName);
}
private static ConfigCenterConfigurationSource createConfigCenterConfigurationSource(
Configuration localConfiguration) {
ConfigCenterConfigurationSource configCenterConfigurationSource =
SPIServiceUtils.getTargetService(ConfigCenterConfigurationSource.class);
if (null == configCenterConfigurationSource) {
LOGGER.info(
"config center SPI service can not find, skip to load configuration from config center");
return null;
}
if (!configCenterConfigurationSource.isValidSource(localConfiguration)) {
LOGGER.info("Config Source serverUri is not correctly configured.");
return null;
}
return configCenterConfigurationSource;
}
private static void createDynamicWatchedConfiguration(
ConcurrentCompositeConfiguration localConfiguration,
ConfigCenterConfigurationSource configCenterConfigurationSource) {
ConcurrentMapConfiguration injectConfig = new ConcurrentMapConfiguration();
localConfiguration.addConfigurationAtFront(injectConfig, "extraInjectConfig");
configCenterConfigurationSource.addUpdateListener(new ServiceCombPropertyUpdateListener(injectConfig));
DynamicWatchedConfiguration configFromConfigCenter =
new DynamicWatchedConfiguration(configCenterConfigurationSource);
duplicateServiceCombConfigToCse(configFromConfigCenter);
localConfiguration.addConfigurationAtFront(configFromConfigCenter, "configCenterConfig");
}
public static AbstractConfiguration createDynamicConfig() {
ConcurrentCompositeConfiguration compositeConfig = ConfigUtil.createLocalConfig();
ConfigCenterConfigurationSource configCenterConfigurationSource =
createConfigCenterConfigurationSource(compositeConfig);
if (configCenterConfigurationSource != null) {
createDynamicWatchedConfiguration(compositeConfig, configCenterConfigurationSource);
}
return compositeConfig;
}
public static void installDynamicConfig() {
if (ConfigurationManager.isConfigurationInstalled()) {
LOGGER.warn("Configuration installed by others, will ignore this configuration.");
return;
}
ConcurrentCompositeConfiguration compositeConfig = ConfigUtil.createLocalConfig();
ConfigCenterConfigurationSource configCenterConfigurationSource =
createConfigCenterConfigurationSource(compositeConfig);
if (configCenterConfigurationSource != null) {
createDynamicWatchedConfiguration(compositeConfig, configCenterConfigurationSource);
}
ConfigurationManager.install(compositeConfig);
if (configCenterConfigurationSource != null) {
configCenterConfigurationSource.init(compositeConfig);
}
}
public static void destroyConfigCenterConfigurationSource() {
SPIServiceUtils.getAllService(ConfigCenterConfigurationSource.class).forEach(source -> {
try {
source.destroy();
} catch (Throwable e) {
LOGGER.error("Failed to destroy {}", source.getClass().getName());
}
});
}
public static void addExtraConfig(String extraConfigName, Map<String, Object> extraConfig) {
EXTRA_CONFIG_MAP.put(extraConfigName, extraConfig);
}
public static void clearExtraConfig() {
EXTRA_CONFIG_MAP.clear();
}
private static class ServiceCombPropertyUpdateListener implements WatchedUpdateListener {
private final ConcurrentMapConfiguration injectConfig;
ServiceCombPropertyUpdateListener(ConcurrentMapConfiguration injectConfig) {
this.injectConfig = injectConfig;
}
@Override
public void updateConfiguration(WatchedUpdateResult watchedUpdateResult) {
Map<String, Object> adds = watchedUpdateResult.getAdded();
if (adds != null) {
for (String add : adds.keySet()) {
if (add.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
String key = CONFIG_CSE_PREFIX + add.substring(add.indexOf(".") + 1);
injectConfig.addProperty(key, adds.get(add));
}
}
}
Map<String, Object> deletes = watchedUpdateResult.getDeleted();
if (deletes != null) {
for (String delete : deletes.keySet()) {
if (delete.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
injectConfig.clearProperty(CONFIG_CSE_PREFIX + delete.substring(delete.indexOf(".") + 1));
}
}
}
Map<String, Object> changes = watchedUpdateResult.getChanged();
if (changes != null) {
for (String change : changes.keySet()) {
if (change.startsWith(CONFIG_SERVICECOMB_PREFIX)) {
String key = CONFIG_CSE_PREFIX + change.substring(change.indexOf(".") + 1);
injectConfig.setProperty(key, changes.get(change));
}
}
}
}
}
}
| 1 | 9,831 | Not sure if the warning message is enough for this time, if the configuration is wrong, java-chassis may need to stop working instead of sending out the warning message to let the admin know about it. BTW, we may need a default override policy here. | apache-servicecomb-java-chassis | java |
@@ -7,7 +7,7 @@ import (
func MustParse(args []string) []string {
parser := &Parser{
- After: []string{"server", "agent", "etcd-snapshot"},
+ After: []string{"server", "agent", "etcd-snapshot", "save", "delete", "list", "prune"},
FlagNames: []string{"--config", "-c"},
EnvName: version.ProgramUpper + "_CONFIG_FILE",
DefaultConfig: "/etc/rancher/" + version.Program + "/config.yaml", | 1 | package configfilearg
import (
"github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
)
func MustParse(args []string) []string {
parser := &Parser{
After: []string{"server", "agent", "etcd-snapshot"},
FlagNames: []string{"--config", "-c"},
EnvName: version.ProgramUpper + "_CONFIG_FILE",
DefaultConfig: "/etc/rancher/" + version.Program + "/config.yaml",
}
result, err := parser.Parse(args)
if err != nil {
logrus.Fatal(err)
}
return result
}
func MustFindString(args []string, target string) string {
parser := &Parser{
After: []string{},
FlagNames: []string{},
EnvName: version.ProgramUpper + "_CONFIG_FILE",
DefaultConfig: "/etc/rancher/" + version.Program + "/config.yaml",
}
result, err := parser.FindString(args, target)
if err != nil {
logrus.Fatal(err)
}
return result
}
| 1 | 10,458 | I see this getting unwieldy as we add more commands with subcommands. Can we perhaps enhance it to handle subcommands properly? Perhaps something like `"etcd-snapshot:1"` which would indicate that the etcd-snapshot command may have 1 subcommand after it, and if the 1 next argument after it doesn't start with `--` then the args should be appended after that subcommand. | k3s-io-k3s | go |
@@ -249,13 +249,13 @@ public class ExecutionController extends EventHandler implements ExecutorManager
}
/**
- * Get all active (running, non-dispatched) flows from database. {@inheritDoc}
+ * Get all running (unfinished) flows from database. {@inheritDoc}
*/
@Override
public List<ExecutableFlow> getRunningFlows() {
final ArrayList<ExecutableFlow> flows = new ArrayList<>();
try {
- getActiveFlowHelper(flows, this.executorLoader.fetchUnfinishedFlows().values());
+ getFlowsHelper(flows, this.executorLoader.fetchUnfinishedFlows().values());
} catch (final ExecutorManagerException e) {
this.logger.error("Failed to get running flows.", e);
} | 1 | /*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import azkaban.Constants.ConfigurationKeys;
import azkaban.event.EventHandler;
import azkaban.flow.FlowUtils;
import azkaban.project.Project;
import azkaban.project.ProjectWhitelist;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.Pair;
import azkaban.utils.Props;
import java.io.IOException;
import java.lang.Thread.State;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Controls flow executions on web server. This module implements the polling model
* in the new AZ dispatching design. It's injected only when azkaban.poll.model is configured to
* true. It will ultimately replace ExecutorManager in the future.
*/
@Singleton
public class ExecutionController extends EventHandler implements ExecutorManagerAdapter {
private static final Logger logger = LoggerFactory.getLogger(ExecutionController.class);
private static final Duration RECENTLY_FINISHED_LIFETIME = Duration.ofMinutes(10);
private static final int DEFAULT_MAX_ONCURRENT_RUNS_ONEFLOW = 30;
private final ExecutorLoader executorLoader;
private final ExecutorApiGateway apiGateway;
private final AlerterHolder alerterHolder;
private final int maxConcurrentRunsOneFlow;
@Inject
ExecutionController(final Props azkProps, final ExecutorLoader executorLoader,
final ExecutorApiGateway apiGateway, final AlerterHolder alerterHolder) {
this.executorLoader = executorLoader;
this.apiGateway = apiGateway;
this.alerterHolder = alerterHolder;
this.maxConcurrentRunsOneFlow = getMaxConcurrentRunsOneFlow(azkProps);
}
private int getMaxConcurrentRunsOneFlow(final Props azkProps) {
// The default threshold is set to 30 for now, in case some users are affected. We may
// decrease this number in future, to better prevent DDos attacks.
return azkProps.getInt(ConfigurationKeys.MAX_CONCURRENT_RUNS_ONEFLOW,
DEFAULT_MAX_ONCURRENT_RUNS_ONEFLOW);
}
@Override
public void setupExecutors() throws ExecutorManagerException {
// Todo: deprecate this method
}
@Override
public void disableQueueProcessorThread() {
// Todo: deprecate this method
}
@Override
public void enableQueueProcessorThread() {
// Todo: deprecate this method
}
@Override
public State getExecutorManagerThreadState() {
// Todo: deprecate this method
return State.RUNNABLE;
}
@Override
public boolean isExecutorManagerThreadActive() {
// Todo: deprecate this method
return true;
}
@Override
public long getLastExecutorManagerThreadCheckTime() {
// Todo: deprecate this method
return 1L;
}
@Override
public Collection<Executor> getAllActiveExecutors() {
List<Executor> executors = new ArrayList<>();
try {
executors = this.executorLoader.fetchActiveExecutors();
} catch (final ExecutorManagerException e) {
this.logger.error("Failed to get all active executors.", e);
}
return executors;
}
@Override
public Executor fetchExecutor(final int executorId) throws ExecutorManagerException {
return this.executorLoader.fetchExecutor(executorId);
}
@Override
public Set<String> getPrimaryServerHosts() {
final HashSet<String> ports = new HashSet<>();
try {
for (final Executor executor : this.executorLoader.fetchActiveExecutors()) {
ports.add(executor.getHost() + ":" + executor.getPort());
}
} catch (final ExecutorManagerException e) {
this.logger.error("Failed to get primary server hosts.", e);
}
return ports;
}
@Override
public Set<String> getAllActiveExecutorServerHosts() {
final Set<String> ports = getPrimaryServerHosts();
// include executor which were initially active and still has flows running
try {
for (final Pair<ExecutionReference, ExecutableFlow> running : this.executorLoader
.fetchActiveFlows().values()) {
final ExecutionReference ref = running.getFirst();
if (ref.getExecutor().isPresent()) {
final Executor executor = ref.getExecutor().get();
ports.add(executor.getHost() + ":" + executor.getPort());
}
}
} catch (final ExecutorManagerException e) {
this.logger.error("Failed to get all active executor server hosts.", e);
}
return ports;
}
/**
* Gets a list of all the unfinished (both dispatched and non-dispatched) executions for a
* given project and flow {@inheritDoc}.
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows(int, java.lang.String)
*/
@Override
public List<Integer> getRunningFlows(final int projectId, final String flowId) {
final List<Integer> executionIds = new ArrayList<>();
try {
executionIds.addAll(getRunningFlowsHelper(projectId, flowId,
this.executorLoader.fetchUnfinishedFlows().values()));
} catch (final ExecutorManagerException e) {
this.logger.error("Failed to get running flows for project " + projectId + ", flow "
+ flowId, e);
}
return executionIds;
}
/* Helper method for getRunningFlows */
private List<Integer> getRunningFlowsHelper(final int projectId, final String flowId,
final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
final List<Integer> executionIds = new ArrayList<>();
for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) {
if (ref.getSecond().getFlowId().equals(flowId)
&& ref.getSecond().getProjectId() == projectId) {
executionIds.add(ref.getFirst().getExecId());
}
}
return executionIds;
}
@Override
public List<Pair<ExecutableFlow, Optional<Executor>>> getActiveFlowsWithExecutor()
throws IOException {
final List<Pair<ExecutableFlow, Optional<Executor>>> flows = new ArrayList<>();
try {
getActiveFlowsWithExecutorHelper(flows, this.executorLoader.fetchUnfinishedFlows().values());
} catch (final ExecutorManagerException e) {
this.logger.error("Failed to get active flows with executor.", e);
}
return flows;
}
/* Helper method for getActiveFlowsWithExecutor */
private void getActiveFlowsWithExecutorHelper(
final List<Pair<ExecutableFlow, Optional<Executor>>> flows,
final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) {
flows.add(new Pair<>(ref.getSecond(), ref
.getFirst().getExecutor()));
}
}
/**
* Checks whether the given flow has an active (running, non-dispatched) execution from
* database. {@inheritDoc}
*/
@Override
public boolean isFlowRunning(final int projectId, final String flowId) {
boolean isRunning = false;
try {
isRunning = isFlowRunningHelper(projectId, flowId,
this.executorLoader.fetchUnfinishedFlows().values());
} catch (final ExecutorManagerException e) {
this.logger.error(
"Failed to check if the flow is running for project " + projectId + ", flow " + flowId,
e);
}
return isRunning;
}
/* Search a running flow in a collection */
private boolean isFlowRunningHelper(final int projectId, final String flowId,
final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) {
if (ref.getSecond().getProjectId() == projectId
&& ref.getSecond().getFlowId().equals(flowId)) {
return true;
}
}
return false;
}
/**
* Fetch ExecutableFlow from database. {@inheritDoc}
*/
@Override
public ExecutableFlow getExecutableFlow(final int execId)
throws ExecutorManagerException {
return this.executorLoader.fetchExecutableFlow(execId);
}
/**
* Get all active (running, non-dispatched) flows from database. {@inheritDoc}
*/
@Override
public List<ExecutableFlow> getRunningFlows() {
final ArrayList<ExecutableFlow> flows = new ArrayList<>();
try {
getActiveFlowHelper(flows, this.executorLoader.fetchUnfinishedFlows().values());
} catch (final ExecutorManagerException e) {
this.logger.error("Failed to get running flows.", e);
}
return flows;
}
/**
* Helper method to get all running flows from a Pair<ExecutionReference,
* ExecutableFlow collection
*/
private void getActiveFlowHelper(final ArrayList<ExecutableFlow> flows,
final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) {
flows.add(ref.getSecond());
}
}
@Override
public List<ExecutableFlow> getRecentlyFinishedFlows() {
List<ExecutableFlow> flows = new ArrayList<>();
try {
flows = this.executorLoader.fetchRecentlyFinishedFlows(
RECENTLY_FINISHED_LIFETIME);
} catch (final ExecutorManagerException e) {
logger.error("Failed to fetch recently finished flows.", e);
}
return flows;
}
@Override
public List<ExecutableFlow> getExecutableFlows(final int skip, final int size)
throws ExecutorManagerException {
final List<ExecutableFlow> flows = this.executorLoader.fetchFlowHistory(skip, size);
return flows;
}
@Override
public List<ExecutableFlow> getExecutableFlows(final String flowIdContains,
final int skip, final int size) throws ExecutorManagerException {
final List<ExecutableFlow> flows =
this.executorLoader.fetchFlowHistory(null, '%' + flowIdContains + '%', null,
0, -1, -1, skip, size);
return flows;
}
@Override
public List<ExecutableFlow> getExecutableFlows(final String projContain,
final String flowContain, final String userContain, final int status, final long begin,
final long end,
final int skip, final int size) throws ExecutorManagerException {
final List<ExecutableFlow> flows =
this.executorLoader.fetchFlowHistory(projContain, flowContain, userContain,
status, begin, end, skip, size);
return flows;
}
@Override
public List<ExecutableJobInfo> getExecutableJobs(final Project project,
final String jobId, final int skip, final int size) throws ExecutorManagerException {
final List<ExecutableJobInfo> nodes =
this.executorLoader.fetchJobHistory(project.getId(), jobId, skip, size);
return nodes;
}
@Override
public int getNumberOfJobExecutions(final Project project, final String jobId)
throws ExecutorManagerException {
return this.executorLoader.fetchNumExecutableNodes(project.getId(), jobId);
}
@Override
public LogData getExecutableFlowLog(final ExecutableFlow exFlow, final int offset,
final int length) throws ExecutorManagerException {
final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader
.fetchActiveFlowByExecId(exFlow.getExecutionId());
if (pair != null) {
final Pair<String, String> typeParam = new Pair<>("type", "flow");
final Pair<String, String> offsetParam =
new Pair<>("offset", String.valueOf(offset));
final Pair<String, String> lengthParam =
new Pair<>("length", String.valueOf(length));
@SuppressWarnings("unchecked") final Map<String, Object> result =
this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, offsetParam, lengthParam);
return LogData.createLogDataFromObject(result);
} else {
final LogData value =
this.executorLoader.fetchLogs(exFlow.getExecutionId(), "", 0, offset,
length);
return value;
}
}
@Override
public LogData getExecutionJobLog(final ExecutableFlow exFlow, final String jobId,
final int offset, final int length, final int attempt) throws ExecutorManagerException {
final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader
.fetchActiveFlowByExecId(exFlow.getExecutionId());
if (pair != null) {
final Pair<String, String> typeParam = new Pair<>("type", "job");
final Pair<String, String> jobIdParam =
new Pair<>("jobId", jobId);
final Pair<String, String> offsetParam =
new Pair<>("offset", String.valueOf(offset));
final Pair<String, String> lengthParam =
new Pair<>("length", String.valueOf(length));
final Pair<String, String> attemptParam =
new Pair<>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked") final Map<String, Object> result =
this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, jobIdParam, offsetParam, lengthParam, attemptParam);
return LogData.createLogDataFromObject(result);
} else {
final LogData value =
this.executorLoader.fetchLogs(exFlow.getExecutionId(), jobId, attempt,
offset, length);
return value;
}
}
@Override
public List<Object> getExecutionJobStats(final ExecutableFlow exFlow, final String jobId,
final int attempt) throws ExecutorManagerException {
final Pair<ExecutionReference, ExecutableFlow> pair =
this.executorLoader.fetchActiveFlowByExecId(exFlow.getExecutionId());
if (pair == null) {
return this.executorLoader.fetchAttachments(exFlow.getExecutionId(), jobId,
attempt);
}
final Pair<String, String> jobIdParam = new Pair<>("jobId", jobId);
final Pair<String, String> attemptParam =
new Pair<>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked") final Map<String, Object> result =
this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.ATTACHMENTS_ACTION,
jobIdParam, attemptParam);
@SuppressWarnings("unchecked") final List<Object> jobStats = (List<Object>) result
.get("attachments");
return jobStats;
}
@Override
public String getJobLinkUrl(final ExecutableFlow exFlow, final String jobId, final int attempt) {
// Todo: deprecate this method
return null;
}
/**
* If a flow is already dispatched to an executor, cancel by calling Executor. Else if it's still
* queued in DB, remove it from DB queue and finalize. {@inheritDoc}
*/
@Override
public void cancelFlow(final ExecutableFlow exFlow, final String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> unfinishedFlows = this.executorLoader
.fetchUnfinishedFlows();
if (unfinishedFlows.containsKey(exFlow.getExecutionId())) {
final Pair<ExecutionReference, ExecutableFlow> pair = unfinishedFlows
.get(exFlow.getExecutionId());
if (pair.getFirst().getExecutor().isPresent()) {
// Flow is already dispatched to an executor, so call that executor to cancel the flow.
this.apiGateway
.callWithReferenceByUser(pair.getFirst(), ConnectorParams.CANCEL_ACTION, userId);
} else {
// Flow is still queued, need to finalize it and update the status in DB.
ExecutionControllerUtils.finalizeFlow(this.executorLoader, this.alerterHolder, exFlow,
"Cancelled before dispatching to executor", null);
}
} else {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
}
}
@Override
public void resumeFlow(final ExecutableFlow exFlow, final String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
final Pair<ExecutionReference, ExecutableFlow> pair =
this.executorLoader.fetchActiveFlowByExecId(exFlow.getExecutionId());
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
this.apiGateway
.callWithReferenceByUser(pair.getFirst(), ConnectorParams.RESUME_ACTION, userId);
}
}
@Override
public void pauseFlow(final ExecutableFlow exFlow, final String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
final Pair<ExecutionReference, ExecutableFlow> pair =
this.executorLoader.fetchActiveFlowByExecId(exFlow.getExecutionId());
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
this.apiGateway
.callWithReferenceByUser(pair.getFirst(), ConnectorParams.PAUSE_ACTION, userId);
}
}
@Override
public void retryFailures(final ExecutableFlow exFlow, final String userId)
throws ExecutorManagerException {
modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RETRY_FAILURES, userId);
}
@SuppressWarnings("unchecked")
private Map<String, Object> modifyExecutingJobs(final ExecutableFlow exFlow,
final String command, final String userId, final String... jobIds)
throws ExecutorManagerException {
synchronized (exFlow) {
final Pair<ExecutionReference, ExecutableFlow> pair =
this.executorLoader.fetchActiveFlowByExecId(exFlow.getExecutionId());
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
final Map<String, Object> response;
if (jobIds != null && jobIds.length > 0) {
for (final String jobId : jobIds) {
if (!jobId.isEmpty()) {
final ExecutableNode node = exFlow.getExecutableNode(jobId);
if (node == null) {
throw new ExecutorManagerException("Job " + jobId
+ " doesn't exist in execution " + exFlow.getExecutionId()
+ ".");
}
}
}
final String ids = StringUtils.join(jobIds, ',');
response =
this.apiGateway.callWithReferenceByUser(pair.getFirst(),
ConnectorParams.MODIFY_EXECUTION_ACTION, userId,
new Pair<>(
ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command),
new Pair<>(ConnectorParams.MODIFY_JOBS_LIST, ids));
} else {
response =
this.apiGateway.callWithReferenceByUser(pair.getFirst(),
ConnectorParams.MODIFY_EXECUTION_ACTION, userId,
new Pair<>(
ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command));
}
return response;
}
}
/**
* When a flow is submitted, insert a new execution into the database queue. {@inheritDoc}
*/
@Override
public String submitExecutableFlow(final ExecutableFlow exflow, final String userId)
throws ExecutorManagerException {
final String exFlowKey = exflow.getProjectName() + "." + exflow.getId() + ".submitFlow";
// Use project and flow name to prevent race condition when same flow is submitted by API and
// schedule at the same time
// causing two same flow submission entering this piece.
synchronized (exFlowKey.intern()) {
final String flowId = exflow.getFlowId();
logger.info("Submitting execution flow " + flowId + " by " + userId);
String message = "";
final int projectId = exflow.getProjectId();
exflow.setSubmitUser(userId);
exflow.setSubmitTime(System.currentTimeMillis());
final List<Integer> running = getRunningFlows(projectId, flowId);
ExecutionOptions options = exflow.getExecutionOptions();
if (options == null) {
options = new ExecutionOptions();
}
if (options.getDisabledJobs() != null) {
FlowUtils.applyDisabledJobs(options.getDisabledJobs(), exflow);
}
if (!running.isEmpty()) {
if (running.size() > this.maxConcurrentRunsOneFlow) {
throw new ExecutorManagerException("Flow " + flowId
+ " has more than " + this.maxConcurrentRunsOneFlow + " concurrent runs. Skipping",
ExecutorManagerException.Reason.SkippedExecution);
} else if (options.getConcurrentOption().equals(
ExecutionOptions.CONCURRENT_OPTION_PIPELINE)) {
Collections.sort(running);
final Integer runningExecId = running.get(running.size() - 1);
options.setPipelineExecutionId(runningExecId);
message =
"Flow " + flowId + " is already running with exec id "
+ runningExecId + ". Pipelining level "
+ options.getPipelineLevel() + ". \n";
} else if (options.getConcurrentOption().equals(
ExecutionOptions.CONCURRENT_OPTION_SKIP)) {
throw new ExecutorManagerException("Flow " + flowId
+ " is already running. Skipping execution.",
ExecutorManagerException.Reason.SkippedExecution);
} else {
message =
"Flow " + flowId + " is already running with exec id "
+ StringUtils.join(running, ",")
+ ". Will execute concurrently. \n";
}
}
final boolean memoryCheck =
!ProjectWhitelist.isProjectWhitelisted(exflow.getProjectId(),
ProjectWhitelist.WhitelistType.MemoryCheck);
options.setMemoryCheck(memoryCheck);
// The exflow id is set by the loader. So it's unavailable until after
// this call.
this.executorLoader.uploadExecutableFlow(exflow);
message += "Execution queued successfully with exec id " + exflow.getExecutionId();
return message;
}
}
@Override
public Map<String, Object> callExecutorStats(final int executorId, final String action,
final Pair<String, String>... params) throws IOException, ExecutorManagerException {
final Executor executor = fetchExecutor(executorId);
final List<Pair<String, String>> paramList = new ArrayList<>();
if (params != null) {
paramList.addAll(Arrays.asList(params));
}
paramList.add(new Pair<>(ConnectorParams.ACTION_PARAM, action));
return this.apiGateway.callForJsonObjectMap(executor.getHost(), executor.getPort(),
"/stats", paramList);
}
@Override
public Map<String, Object> callExecutorJMX(final String hostPort, final String action,
final String mBean) throws IOException {
final List<Pair<String, String>> paramList =
new ArrayList<>();
paramList.add(new Pair<>(action, ""));
if (mBean != null) {
paramList.add(new Pair<>(ConnectorParams.JMX_MBEAN, mBean));
}
final String[] hostPortSplit = hostPort.split(":");
return this.apiGateway.callForJsonObjectMap(hostPortSplit[0],
Integer.valueOf(hostPortSplit[1]), "/jmx", paramList);
}
@Override
public void shutdown() {
//Todo: shutdown any thread that is running
}
@Override
public int getExecutableFlows(final int projectId, final String flowId, final int from,
final int length, final List<ExecutableFlow> outputList)
throws ExecutorManagerException {
final List<ExecutableFlow> flows =
this.executorLoader.fetchFlowHistory(projectId, flowId, from, length);
outputList.addAll(flows);
return this.executorLoader.fetchNumExecutableFlows(projectId, flowId);
}
@Override
public List<ExecutableFlow> getExecutableFlows(final int projectId, final String flowId,
final int from, final int length, final Status status) throws ExecutorManagerException {
return this.executorLoader.fetchFlowHistory(projectId, flowId, from, length,
status);
}
}
| 1 | 17,372 | why returning a string instead of a list? | azkaban-azkaban | java |
@@ -3,11 +3,13 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
+using Datadog.Trace.Configuration;
+
namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AdoNet.Npgsql
{
internal static class NpgsqlConstants
{
- public const string SqlCommandIntegrationName = "NpgsqlCommand";
+ public const string SqlCommandIntegrationName = nameof(IntegrationIds.AdoNet);
internal struct NpgsqlClientData : IAdoNetClientData
{ | 1 | // <copyright file="NpgsqlConstants.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AdoNet.Npgsql
{
internal static class NpgsqlConstants
{
public const string SqlCommandIntegrationName = "NpgsqlCommand";
internal struct NpgsqlClientData : IAdoNetClientData
{
public string IntegrationName => SqlCommandIntegrationName;
public string AssemblyName => "Npgsql";
public string SqlCommandType => "Npgsql.NpgsqlCommand";
public string MinimumVersion => "4.0.0";
public string MaximumVersion => "5.*.*";
public string DataReaderType => "Npgsql.NpgsqlDataReader";
public string DataReaderTaskType => "System.Threading.Tasks.Task`1<Npgsql.NpgsqlDataReader>";
}
}
}
| 1 | 22,189 | Was there a reason for changing all these integration names? Are they actually used for anything besides information in logs? | DataDog-dd-trace-dotnet | .cs |
@@ -422,6 +422,10 @@ public class FieldInfos implements Iterable<FieldInfo> {
}
}
+ synchronized Set<String> getFieldNames() {
+ return Set.copyOf(nameToNumber.keySet());
+ }
+
synchronized void clear() {
numberToName.clear();
nameToNumber.clear(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.lucene.util.ArrayUtil;
/**
* Collection of {@link FieldInfo}s (accessible by number or by name).
* @lucene.experimental
*/
public class FieldInfos implements Iterable<FieldInfo> {
/** An instance without any fields. */
public final static FieldInfos EMPTY = new FieldInfos(new FieldInfo[0]);
private final boolean hasFreq;
private final boolean hasProx;
private final boolean hasPayloads;
private final boolean hasOffsets;
private final boolean hasVectors;
private final boolean hasNorms;
private final boolean hasDocValues;
private final boolean hasPointValues;
private final String softDeletesField;
// used only by fieldInfo(int)
private final FieldInfo[] byNumber;
private final HashMap<String,FieldInfo> byName = new HashMap<>();
private final Collection<FieldInfo> values; // for an unmodifiable iterator
/**
* Constructs a new FieldInfos from an array of FieldInfo objects
*/
public FieldInfos(FieldInfo[] infos) {
boolean hasVectors = false;
boolean hasProx = false;
boolean hasPayloads = false;
boolean hasOffsets = false;
boolean hasFreq = false;
boolean hasNorms = false;
boolean hasDocValues = false;
boolean hasPointValues = false;
String softDeletesField = null;
int size = 0; // number of elements in byNumberTemp, number of used array slots
FieldInfo[] byNumberTemp = new FieldInfo[10]; // initial array capacity of 10
for (FieldInfo info : infos) {
if (info.number < 0) {
throw new IllegalArgumentException("illegal field number: " + info.number + " for field " + info.name);
}
size = info.number >= size ? info.number+1 : size;
if (info.number >= byNumberTemp.length){ //grow array
byNumberTemp = ArrayUtil.grow(byNumberTemp, info.number + 1);
}
FieldInfo previous = byNumberTemp[info.number];
if (previous != null) {
throw new IllegalArgumentException("duplicate field numbers: " + previous.name + " and " + info.name + " have: " + info.number);
}
byNumberTemp[info.number] = info;
previous = byName.put(info.name, info);
if (previous != null) {
throw new IllegalArgumentException("duplicate field names: " + previous.number + " and " + info.number + " have: " + info.name);
}
hasVectors |= info.hasVectors();
hasProx |= info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
hasFreq |= info.getIndexOptions() != IndexOptions.DOCS;
hasOffsets |= info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
hasNorms |= info.hasNorms();
hasDocValues |= info.getDocValuesType() != DocValuesType.NONE;
hasPayloads |= info.hasPayloads();
hasPointValues |= (info.getPointDataDimensionCount() != 0);
if (info.isSoftDeletesField()) {
if (softDeletesField != null && softDeletesField.equals(info.name) == false) {
throw new IllegalArgumentException("multiple soft-deletes fields [" + info.name + ", " + softDeletesField + "]");
}
softDeletesField = info.name;
}
}
this.hasVectors = hasVectors;
this.hasProx = hasProx;
this.hasPayloads = hasPayloads;
this.hasOffsets = hasOffsets;
this.hasFreq = hasFreq;
this.hasNorms = hasNorms;
this.hasDocValues = hasDocValues;
this.hasPointValues = hasPointValues;
this.softDeletesField = softDeletesField;
List<FieldInfo> valuesTemp = new ArrayList<>();
byNumber = new FieldInfo[size];
for(int i=0; i<size; i++){
byNumber[i] = byNumberTemp[i];
if (byNumberTemp[i] != null) {
valuesTemp.add(byNumberTemp[i]);
}
}
values = Collections.unmodifiableCollection(Arrays.asList(valuesTemp.toArray(new FieldInfo[0])));
}
/** Call this to get the (merged) FieldInfos for a
* composite reader.
* <p>
* NOTE: the returned field numbers will likely not
* correspond to the actual field numbers in the underlying
* readers, and codec metadata ({@link FieldInfo#getAttribute(String)}
* will be unavailable.
*/
public static FieldInfos getMergedFieldInfos(IndexReader reader) {
final List<LeafReaderContext> leaves = reader.leaves();
if (leaves.isEmpty()) {
return FieldInfos.EMPTY;
} else if (leaves.size() == 1) {
return leaves.get(0).reader().getFieldInfos();
} else {
final String softDeletesField = leaves.stream()
.map(l -> l.reader().getFieldInfos().getSoftDeletesField())
.filter(Objects::nonNull)
.findAny().orElse(null);
final Builder builder = new Builder(new FieldNumbers(softDeletesField));
for (final LeafReaderContext ctx : leaves) {
builder.add(ctx.reader().getFieldInfos());
}
return builder.finish();
}
}
/** Returns a set of names of fields that have a terms index. The order is undefined. */
public static Collection<String> getIndexedFields(IndexReader reader) {
return reader.leaves().stream()
.flatMap(l -> StreamSupport.stream(l.reader().getFieldInfos().spliterator(), false)
.filter(fi -> fi.getIndexOptions() != IndexOptions.NONE))
.map(fi -> fi.name)
.collect(Collectors.toSet());
}
/** Returns true if any fields have freqs */
public boolean hasFreq() {
return hasFreq;
}
/** Returns true if any fields have positions */
public boolean hasProx() {
return hasProx;
}
/** Returns true if any fields have payloads */
public boolean hasPayloads() {
return hasPayloads;
}
/** Returns true if any fields have offsets */
public boolean hasOffsets() {
return hasOffsets;
}
/** Returns true if any fields have vectors */
public boolean hasVectors() {
return hasVectors;
}
/** Returns true if any fields have norms */
public boolean hasNorms() {
return hasNorms;
}
/** Returns true if any fields have DocValues */
public boolean hasDocValues() {
return hasDocValues;
}
/** Returns true if any fields have PointValues */
public boolean hasPointValues() {
return hasPointValues;
}
/** Returns the soft-deletes field name if exists; otherwise returns null */
public String getSoftDeletesField() {
return softDeletesField;
}
/** Returns the number of fields */
public int size() {
return byName.size();
}
/**
* Returns an iterator over all the fieldinfo objects present,
* ordered by ascending field number
*/
// TODO: what happens if in fact a different order is used?
@Override
public Iterator<FieldInfo> iterator() {
return values.iterator();
}
/**
* Return the fieldinfo object referenced by the field name
* @return the FieldInfo object or null when the given fieldName
* doesn't exist.
*/
public FieldInfo fieldInfo(String fieldName) {
return byName.get(fieldName);
}
/**
* Return the fieldinfo object referenced by the fieldNumber.
* @param fieldNumber field's number.
* @return the FieldInfo object or null when the given fieldNumber
* doesn't exist.
* @throws IllegalArgumentException if fieldNumber is negative
*/
public FieldInfo fieldInfo(int fieldNumber) {
if (fieldNumber < 0) {
throw new IllegalArgumentException("Illegal field number: " + fieldNumber);
}
if (fieldNumber >= byNumber.length) {
return null;
}
return byNumber[fieldNumber];
}
static final class FieldDimensions {
public final int dataDimensionCount;
public final int indexDimensionCount;
public final int dimensionNumBytes;
public FieldDimensions(int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes) {
this.dataDimensionCount = dataDimensionCount;
this.indexDimensionCount = indexDimensionCount;
this.dimensionNumBytes = dimensionNumBytes;
}
}
static final class FieldNumbers {
private final Map<Integer,String> numberToName;
private final Map<String,Integer> nameToNumber;
private final Map<String, IndexOptions> indexOptions;
// We use this to enforce that a given field never
// changes DV type, even across segments / IndexWriter
// sessions:
private final Map<String,DocValuesType> docValuesType;
private final Map<String,FieldDimensions> dimensions;
// TODO: we should similarly catch an attempt to turn
// norms back on after they were already ommitted; today
// we silently discard the norm but this is badly trappy
private int lowestUnassignedFieldNumber = -1;
// The soft-deletes field from IWC to enforce a single soft-deletes field
private final String softDeletesFieldName;
FieldNumbers(String softDeletesFieldName) {
this.nameToNumber = new HashMap<>();
this.numberToName = new HashMap<>();
this.indexOptions = new HashMap<>();
this.docValuesType = new HashMap<>();
this.dimensions = new HashMap<>();
this.softDeletesFieldName = softDeletesFieldName;
}
/**
* Returns the global field number for the given field name. If the name
* does not exist yet it tries to add it with the given preferred field
* number assigned if possible otherwise the first unassigned field number
* is used as the field number.
*/
synchronized int addOrGet(String fieldName, int preferredFieldNumber, IndexOptions indexOptions, DocValuesType dvType, int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes, boolean isSoftDeletesField) {
if (indexOptions != IndexOptions.NONE) {
IndexOptions currentOpts = this.indexOptions.get(fieldName);
if (currentOpts == null) {
this.indexOptions.put(fieldName, indexOptions);
} else if (currentOpts != IndexOptions.NONE && currentOpts != indexOptions) {
throw new IllegalArgumentException("cannot change field \"" + fieldName + "\" from index options=" + currentOpts + " to inconsistent index options=" + indexOptions);
}
}
if (dvType != DocValuesType.NONE) {
DocValuesType currentDVType = docValuesType.get(fieldName);
if (currentDVType == null) {
docValuesType.put(fieldName, dvType);
} else if (currentDVType != DocValuesType.NONE && currentDVType != dvType) {
throw new IllegalArgumentException("cannot change DocValues type from " + currentDVType + " to " + dvType + " for field \"" + fieldName + "\"");
}
}
if (dataDimensionCount != 0) {
FieldDimensions dims = dimensions.get(fieldName);
if (dims != null) {
if (dims.dataDimensionCount != dataDimensionCount) {
throw new IllegalArgumentException("cannot change point data dimension count from " + dims.dataDimensionCount + " to " + dataDimensionCount + " for field=\"" + fieldName + "\"");
}
if (dims.indexDimensionCount != indexDimensionCount) {
throw new IllegalArgumentException("cannot change point index dimension count from " + dims.indexDimensionCount + " to " + indexDimensionCount + " for field=\"" + fieldName + "\"");
}
if (dims.dimensionNumBytes != dimensionNumBytes) {
throw new IllegalArgumentException("cannot change point numBytes from " + dims.dimensionNumBytes + " to " + dimensionNumBytes + " for field=\"" + fieldName + "\"");
}
} else {
dimensions.put(fieldName, new FieldDimensions(dataDimensionCount, indexDimensionCount, dimensionNumBytes));
}
}
Integer fieldNumber = nameToNumber.get(fieldName);
if (fieldNumber == null) {
final Integer preferredBoxed = Integer.valueOf(preferredFieldNumber);
if (preferredFieldNumber != -1 && !numberToName.containsKey(preferredBoxed)) {
// cool - we can use this number globally
fieldNumber = preferredBoxed;
} else {
// find a new FieldNumber
while (numberToName.containsKey(++lowestUnassignedFieldNumber)) {
// might not be up to date - lets do the work once needed
}
fieldNumber = lowestUnassignedFieldNumber;
}
assert fieldNumber >= 0;
numberToName.put(fieldNumber, fieldName);
nameToNumber.put(fieldName, fieldNumber);
}
if (isSoftDeletesField) {
if (softDeletesFieldName == null) {
throw new IllegalArgumentException("this index has [" + fieldName + "] as soft-deletes already but soft-deletes field is not configured in IWC");
} else if (fieldName.equals(softDeletesFieldName) == false) {
throw new IllegalArgumentException("cannot configure [" + softDeletesFieldName + "] as soft-deletes; this index uses [" + fieldName + "] as soft-deletes already");
}
} else if (fieldName.equals(softDeletesFieldName)) {
throw new IllegalArgumentException("cannot configure [" + softDeletesFieldName + "] as soft-deletes; this index uses [" + fieldName + "] as non-soft-deletes already");
}
return fieldNumber.intValue();
}
synchronized void verifyConsistent(Integer number, String name, IndexOptions indexOptions) {
if (name.equals(numberToName.get(number)) == false) {
throw new IllegalArgumentException("field number " + number + " is already mapped to field name \"" + numberToName.get(number) + "\", not \"" + name + "\"");
}
if (number.equals(nameToNumber.get(name)) == false) {
throw new IllegalArgumentException("field name \"" + name + "\" is already mapped to field number \"" + nameToNumber.get(name) + "\", not \"" + number + "\"");
}
IndexOptions currentIndexOptions = this.indexOptions.get(name);
if (indexOptions != IndexOptions.NONE && currentIndexOptions != null && currentIndexOptions != IndexOptions.NONE && indexOptions != currentIndexOptions) {
throw new IllegalArgumentException("cannot change field \"" + name + "\" from index options=" + currentIndexOptions + " to inconsistent index options=" + indexOptions);
}
}
synchronized void verifyConsistent(Integer number, String name, DocValuesType dvType) {
if (name.equals(numberToName.get(number)) == false) {
throw new IllegalArgumentException("field number " + number + " is already mapped to field name \"" + numberToName.get(number) + "\", not \"" + name + "\"");
}
if (number.equals(nameToNumber.get(name)) == false) {
throw new IllegalArgumentException("field name \"" + name + "\" is already mapped to field number \"" + nameToNumber.get(name) + "\", not \"" + number + "\"");
}
DocValuesType currentDVType = docValuesType.get(name);
if (dvType != DocValuesType.NONE && currentDVType != null && currentDVType != DocValuesType.NONE && dvType != currentDVType) {
throw new IllegalArgumentException("cannot change DocValues type from " + currentDVType + " to " + dvType + " for field \"" + name + "\"");
}
}
synchronized void verifyConsistentDimensions(Integer number, String name, int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes) {
if (name.equals(numberToName.get(number)) == false) {
throw new IllegalArgumentException("field number " + number + " is already mapped to field name \"" + numberToName.get(number) + "\", not \"" + name + "\"");
}
if (number.equals(nameToNumber.get(name)) == false) {
throw new IllegalArgumentException("field name \"" + name + "\" is already mapped to field number \"" + nameToNumber.get(name) + "\", not \"" + number + "\"");
}
FieldDimensions dim = dimensions.get(name);
if (dim != null) {
if (dim.dataDimensionCount != dataDimensionCount) {
throw new IllegalArgumentException("cannot change point data dimension count from " + dim.dataDimensionCount + " to " + dataDimensionCount + " for field=\"" + name + "\"");
}
if (dim.indexDimensionCount != indexDimensionCount) {
throw new IllegalArgumentException("cannot change point index dimension count from " + dim.indexDimensionCount + " to " + indexDimensionCount + " for field=\"" + name + "\"");
}
if (dim.dimensionNumBytes != dimensionNumBytes) {
throw new IllegalArgumentException("cannot change point numBytes from " + dim.dimensionNumBytes + " to " + dimensionNumBytes + " for field=\"" + name + "\"");
}
}
}
/**
* Returns true if the {@code fieldName} exists in the map and is of the
* same {@code dvType}.
*/
synchronized boolean contains(String fieldName, DocValuesType dvType) {
// used by IndexWriter.updateNumericDocValue
if (!nameToNumber.containsKey(fieldName)) {
return false;
} else {
// only return true if the field has the same dvType as the requested one
return dvType == docValuesType.get(fieldName);
}
}
synchronized void clear() {
numberToName.clear();
nameToNumber.clear();
indexOptions.clear();
docValuesType.clear();
dimensions.clear();
}
synchronized void setIndexOptions(int number, String name, IndexOptions indexOptions) {
verifyConsistent(number, name, indexOptions);
this.indexOptions.put(name, indexOptions);
}
synchronized void setDocValuesType(int number, String name, DocValuesType dvType) {
verifyConsistent(number, name, dvType);
docValuesType.put(name, dvType);
}
synchronized void setDimensions(int number, String name, int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes) {
if (dimensionNumBytes > PointValues.MAX_NUM_BYTES) {
throw new IllegalArgumentException("dimension numBytes must be <= PointValues.MAX_NUM_BYTES (= " + PointValues.MAX_NUM_BYTES + "); got " + dimensionNumBytes + " for field=\"" + name + "\"");
}
if (dataDimensionCount > PointValues.MAX_DIMENSIONS) {
throw new IllegalArgumentException("pointDataDimensionCount must be <= PointValues.MAX_DIMENSIONS (= " + PointValues.MAX_DIMENSIONS + "); got " + dataDimensionCount + " for field=\"" + name + "\"");
}
if (indexDimensionCount > dataDimensionCount) {
throw new IllegalArgumentException("pointIndexDimensionCount must be <= pointDataDimensionCount (= " + dataDimensionCount + "); got " + indexDimensionCount + " for field=\"" + name + "\"");
}
verifyConsistentDimensions(number, name, dataDimensionCount, indexDimensionCount, dimensionNumBytes);
dimensions.put(name, new FieldDimensions(dataDimensionCount, indexDimensionCount, dimensionNumBytes));
}
}
static final class Builder {
private final HashMap<String,FieldInfo> byName = new HashMap<>();
final FieldNumbers globalFieldNumbers;
private boolean finished;
/**
* Creates a new instance with the given {@link FieldNumbers}.
*/
Builder(FieldNumbers globalFieldNumbers) {
assert globalFieldNumbers != null;
this.globalFieldNumbers = globalFieldNumbers;
}
public void add(FieldInfos other) {
assert assertNotFinished();
for(FieldInfo fieldInfo : other){
add(fieldInfo);
}
}
/** Create a new field, or return existing one. */
public FieldInfo getOrAdd(String name) {
FieldInfo fi = fieldInfo(name);
if (fi == null) {
assert assertNotFinished();
// This field wasn't yet added to this in-RAM
// segment's FieldInfo, so now we get a global
// number for this field. If the field was seen
// before then we'll get the same name and number,
// else we'll allocate a new one:
final boolean isSoftDeletesField = name.equals(globalFieldNumbers.softDeletesFieldName);
final int fieldNumber = globalFieldNumbers.addOrGet(name, -1, IndexOptions.NONE, DocValuesType.NONE, 0, 0, 0, isSoftDeletesField);
fi = new FieldInfo(name, fieldNumber, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, new HashMap<>(), 0, 0, 0, isSoftDeletesField);
assert !byName.containsKey(fi.name);
globalFieldNumbers.verifyConsistent(Integer.valueOf(fi.number), fi.name, DocValuesType.NONE);
byName.put(fi.name, fi);
}
return fi;
}
private FieldInfo addOrUpdateInternal(String name, int preferredFieldNumber,
boolean storeTermVector,
boolean omitNorms, boolean storePayloads, IndexOptions indexOptions,
DocValuesType docValues, long dvGen,
Map<String, String> attributes,
int dataDimensionCount, int indexDimensionCount, int dimensionNumBytes,
boolean isSoftDeletesField) {
assert assertNotFinished();
if (docValues == null) {
throw new NullPointerException("DocValuesType must not be null");
}
if (attributes != null) {
// original attributes is UnmodifiableMap
attributes = new HashMap<>(attributes);
}
FieldInfo fi = fieldInfo(name);
if (fi == null) {
// This field wasn't yet added to this in-RAM
// segment's FieldInfo, so now we get a global
// number for this field. If the field was seen
// before then we'll get the same name and number,
// else we'll allocate a new one:
final int fieldNumber = globalFieldNumbers.addOrGet(name, preferredFieldNumber, indexOptions, docValues, dataDimensionCount, indexDimensionCount, dimensionNumBytes, isSoftDeletesField);
fi = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads, indexOptions, docValues, dvGen, attributes, dataDimensionCount, indexDimensionCount, dimensionNumBytes, isSoftDeletesField);
assert !byName.containsKey(fi.name);
globalFieldNumbers.verifyConsistent(Integer.valueOf(fi.number), fi.name, fi.getDocValuesType());
byName.put(fi.name, fi);
} else {
fi.update(storeTermVector, omitNorms, storePayloads, indexOptions, attributes, dataDimensionCount, indexDimensionCount, dimensionNumBytes);
if (docValues != DocValuesType.NONE) {
// Only pay the synchronization cost if fi does not already have a DVType
boolean updateGlobal = fi.getDocValuesType() == DocValuesType.NONE;
if (updateGlobal) {
// Must also update docValuesType map so it's
// aware of this field's DocValuesType. This will throw IllegalArgumentException if
// an illegal type change was attempted.
globalFieldNumbers.setDocValuesType(fi.number, name, docValues);
}
fi.setDocValuesType(docValues); // this will also perform the consistency check.
fi.setDocValuesGen(dvGen);
}
}
return fi;
}
public FieldInfo add(FieldInfo fi) {
return add(fi, -1);
}
public FieldInfo add(FieldInfo fi, long dvGen) {
// IMPORTANT - reuse the field number if possible for consistent field numbers across segments
return addOrUpdateInternal(fi.name, fi.number, fi.hasVectors(),
fi.omitsNorms(), fi.hasPayloads(),
fi.getIndexOptions(), fi.getDocValuesType(), dvGen,
fi.attributes(),
fi.getPointDataDimensionCount(), fi.getPointIndexDimensionCount(), fi.getPointNumBytes(),
fi.isSoftDeletesField());
}
public FieldInfo fieldInfo(String fieldName) {
return byName.get(fieldName);
}
/** Called only from assert */
private boolean assertNotFinished() {
if (finished) {
throw new IllegalStateException("FieldInfos.Builder was already finished; cannot add new fields");
}
return true;
}
FieldInfos finish() {
finished = true;
return new FieldInfos(byName.values().toArray(new FieldInfo[byName.size()]));
}
}
}
| 1 | 29,851 | we should not resurrect this method that we just removed | apache-lucene-solr | java |
@@ -837,7 +837,18 @@ class WebDriver(object):
del png
return True
- save_screenshot = get_screenshot_as_file
+ def save_screenshot(self, filename):
+ """
+ Gets the screenshot of the current window. Returns False if there is
+ any IOError, else returns True. Use full paths in your filename.
+
+ :Args:
+ - filename: The full path you wish to save your screenshot to.
+
+ :Usage:
+ driver.save_screenshot('/Screenshots/foo.png')
+ """
+ return self.get_screenshot_as_file(self, filename)
def get_screenshot_as_png(self):
""" | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The WebDriver implementation."""
import base64
import warnings
from contextlib import contextmanager
from .command import Command
from .webelement import WebElement
from .remote_connection import RemoteConnection
from .errorhandler import ErrorHandler
from .switch_to import SwitchTo
from .mobile import Mobile
from .file_detector import FileDetector, LocalFileDetector
from selenium.common.exceptions import (InvalidArgumentException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.html5.application_cache import ApplicationCache
try:
str = basestring
except NameError:
pass
class WebDriver(object):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol
as defined at
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol
:Attributes:
- session_id - String ID of the browser session started and controlled by this WebDriver.
- capabilities - Dictionaty of effective capabilities of this browser session as returned
by the remote server. See https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities
- command_executor - remote_connection.RemoteConnection object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to handle errors.
"""
_web_element_cls = WebElement
def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub',
desired_capabilities=None, browser_profile=None, proxy=None,
keep_alive=False, file_detector=None):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a string representing URL of the remote server or a custom
remote_connection.RemoteConnection object. Defaults to 'http://127.0.0.1:4444/wd/hub'.
- desired_capabilities - A dictionary of capabilities to request when
starting the browser session. Required parameter.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object.
Only used if Firefox is requested. Optional.
- proxy - A selenium.webdriver.common.proxy.Proxy object. The browser session will
be started with given proxy settings, if possible. Optional.
- keep_alive - Whether to configure remote_connection.RemoteConnection to use
HTTP keep-alive. Defaults to False.
- file_detector - Pass custom file detector object during instantiation. If None,
then default LocalFileDetector() will be used.
"""
if desired_capabilities is None:
raise WebDriverException("Desired Capabilities can't be None")
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
if proxy is not None:
warnings.warn("Please use FirefoxOptions to set proxy",
DeprecationWarning)
proxy.add_to_capabilities(desired_capabilities)
self.command_executor = command_executor
if type(self.command_executor) is bytes or isinstance(self.command_executor, str):
self.command_executor = RemoteConnection(command_executor, keep_alive=keep_alive)
self._is_remote = True
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
if browser_profile is not None:
warnings.warn("Please use FirefoxOptions to set browser profile",
DeprecationWarning)
self.start_session(desired_capabilities, browser_profile)
self._switch_to = SwitchTo(self)
self._mobile = Mobile(self)
self.file_detector = file_detector or LocalFileDetector()
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}")>'.format(
type(self), self.session_id)
@contextmanager
def file_detector_context(self, file_detector_class, *args, **kwargs):
"""
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
"""
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector is not None:
self.file_detector = last_detector
@property
def mobile(self):
return self._mobile
@property
def name(self):
"""Returns the name of the underlying browser for this instance.
:Usage:
- driver.name
"""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom startup behavior.
"""
pass
def stop_client(self):
"""
Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, capabilities, browser_profile=None):
"""
Creates a new session with the desired capabilities.
:Args:
- browser_name - The name of the browser to request.
- version - Which browser version to request.
- platform - Which platform to request the browser on.
- javascript_enabled - Whether the new session should support JavaScript.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if not isinstance(capabilities, dict):
raise InvalidArgumentException("Capabilities must be a dictionary")
w3c_caps = {"firstMatch": [], "alwaysMatch": {}}
w3c_caps.update(capabilities)
if browser_profile:
w3c_caps["firstMatch"].append({"firefox_profile": browser_profile.encoded})
parameters = {"capabilities": w3c_caps,
"desiredCapabilities": capabilities}
response = self.execute(Command.NEW_SESSION, parameters)
if 'sessionId' not in response:
response = response['value']
self.session_id = response['sessionId']
self.capabilities = response.get('value')
# if capabilities is none we are probably speaking to
# a W3C endpoint
if self.capabilities is None:
self.capabilities = response.get('capabilities')
# Double check to see if we have a W3C Compliant browser
self.w3c = response.get('status') is None
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, self._web_element_cls):
return {'ELEMENT': value.id, 'element-6066-11e4-a52e-4f735466cecf': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
"""Creates a web element with the specified `element_id`."""
return self._web_element_cls(self, element_id, w3c=self.w3c)
def _unwrap_value(self, value):
if isinstance(value, dict) and ('ELEMENT' in value or 'element-6066-11e4-a52e-4f735466cecf' in value):
wrapped_id = value.get('ELEMENT', None)
if wrapped_id:
return self.create_web_element(value['ELEMENT'])
else:
return self.create_web_element(value['element-6066-11e4-a52e-4f735466cecf'])
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command, params=None):
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if self.session_id is not None:
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url):
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self):
"""Returns the title of the current page.
:Usage:
driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] is not None else ""
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\_ - The id of the element to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""
Finds multiple elements by id.
:Args:
- id\_ - The id of the elements to be found.
:Usage:
driver.find_elements_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Usage:
driver.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Usage:
driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
"""
Finds an element by link text.
:Args:
- link_text: The text of the element to be found.
:Usage:
driver.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Usage:
driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
"""
Finds an element by a partial match of its link text.
:Args:
- link_text: The text of the element to partially match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""
Finds elements by a partial match of their link text.
:Args:
- link_text: The text of the element to partial match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
"""
Finds an element by name.
:Args:
- name: The name of the element to find.
:Usage:
driver.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""
Finds elements by name.
:Args:
- name: The name of the elements to find.
:Usage:
driver.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
"""
Finds an element by tag name.
:Args:
- name: The tag name of the element to find.
:Usage:
driver.find_element_by_tag_name('foo')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""
Finds elements by tag name.
:Args:
- name: The tag name the use when finding elements.
:Usage:
driver.find_elements_by_tag_name('foo')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
"""
Finds an element by class name.
:Args:
- name: The class name of the element to find.
:Usage:
driver.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Usage:
driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""
Finds an element by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""
Finds elements by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def execute_script(self, script, *args):
"""
Synchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_script('document.title')
"""
converted_args = list(args)
command = None
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT
else:
command = Command.EXECUTE_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
def execute_async_script(self, script, *args):
"""
Asynchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_async_script('document.title')
"""
converted_args = list(args)
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT_ASYNC
else:
command = Command.EXECUTE_ASYNC_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
@property
def current_url(self):
"""
Gets the URL of the current page.
:Usage:
driver.current_url
"""
return self.execute(Command.GET_CURRENT_URL)['value']
@property
def page_source(self):
"""
Gets the source of the current page.
:Usage:
driver.page_source
"""
return self.execute(Command.GET_PAGE_SOURCE)['value']
def close(self):
"""
Closes the current window.
:Usage:
driver.close()
"""
self.execute(Command.CLOSE)
def quit(self):
"""
Quits the driver and closes every associated window.
:Usage:
driver.quit()
"""
try:
self.execute(Command.QUIT)
finally:
self.stop_client()
@property
def current_window_handle(self):
"""
Returns the handle of the current window.
:Usage:
driver.current_window_handle
"""
if self.w3c:
return self.execute(Command.W3C_GET_CURRENT_WINDOW_HANDLE)['value']
else:
return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
@property
def window_handles(self):
"""
Returns the handles of all windows within the current session.
:Usage:
driver.window_handles
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value']
else:
return self.execute(Command.GET_WINDOW_HANDLES)['value']
def maximize_window(self):
"""
Maximizes the current window that webdriver is using
"""
command = Command.MAXIMIZE_WINDOW
if self.w3c:
command = Command.W3C_MAXIMIZE_WINDOW
self.execute(command, {"windowHandle": "current"})
@property
def switch_to(self):
return self._switch_to
# Target Locators
def switch_to_active_element(self):
""" Deprecated use driver.switch_to.active_element
"""
warnings.warn("use driver.switch_to.active_element instead", DeprecationWarning)
return self._switch_to.active_element
def switch_to_window(self, window_name):
""" Deprecated use driver.switch_to.window
"""
warnings.warn("use driver.switch_to.window instead", DeprecationWarning)
self._switch_to.window(window_name)
def switch_to_frame(self, frame_reference):
""" Deprecated use driver.switch_to.frame
"""
warnings.warn("use driver.switch_to.frame instead", DeprecationWarning)
self._switch_to.frame(frame_reference)
def switch_to_default_content(self):
""" Deprecated use driver.switch_to.default_content
"""
warnings.warn("use driver.switch_to.default_content instead", DeprecationWarning)
self._switch_to.default_content()
def switch_to_alert(self):
""" Deprecated use driver.switch_to.alert
"""
warnings.warn("use driver.switch_to.alert instead", DeprecationWarning)
return self._switch_to.alert
# Navigation
def back(self):
"""
Goes one step backward in the browser history.
:Usage:
driver.back()
"""
self.execute(Command.GO_BACK)
def forward(self):
"""
Goes one step forward in the browser history.
:Usage:
driver.forward()
"""
self.execute(Command.GO_FORWARD)
def refresh(self):
"""
Refreshes the current page.
:Usage:
driver.refresh()
"""
self.execute(Command.REFRESH)
# Options
def get_cookies(self):
"""
Returns a set of dictionaries, corresponding to cookies visible in the current session.
:Usage:
driver.get_cookies()
"""
return self.execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name):
"""
Get a single cookie by name. Returns the cookie if found, None if not.
:Usage:
driver.get_cookie('my_cookie')
"""
cookies = self.get_cookies()
for cookie in cookies:
if cookie['name'] == name:
return cookie
return None
def delete_cookie(self, name):
"""
Deletes a single cookie with the given name.
:Usage:
driver.delete_cookie('my_cookie')
"""
self.execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self):
"""
Delete all cookies in the scope of the session.
:Usage:
driver.delete_all_cookies()
"""
self.execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict):
"""
Adds a cookie to your current session.
:Args:
- cookie_dict: A dictionary object, with required keys - "name" and "value";
optional keys - "path", "domain", "secure", "expiry"
Usage:
driver.add_cookie({'name' : 'foo', 'value' : 'bar'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True})
"""
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
# Timeouts
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'implicit': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.IMPLICIT_WAIT, {
'ms': float(time_to_wait) * 1000})
def set_script_timeout(self, time_to_wait):
"""
Set the amount of time that the script should wait during an
execute_async_script call before throwing an error.
:Args:
- time_to_wait: The amount of time to wait (in seconds)
:Usage:
driver.set_script_timeout(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'script': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.SET_SCRIPT_TIMEOUT, {
'ms': float(time_to_wait) * 1000})
def set_page_load_timeout(self, time_to_wait):
"""
Set the amount of time to wait for a page load to complete
before throwing an error.
:Args:
- time_to_wait: The amount of time to wait
:Usage:
driver.set_page_load_timeout(30)
"""
try:
self.execute(Command.SET_TIMEOUTS, {
'pageLoad': int(float(time_to_wait) * 1000)})
except WebDriverException:
self.execute(Command.SET_TIMEOUTS, {
'ms': float(time_to_wait) * 1000,
'type': 'page load'})
def find_element(self, by=By.ID, value=None):
"""
'Private' method used by the find_element_by_* methods.
:Usage:
Use the corresponding find_element_by_* instead of this.
:rtype: WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENT, {
'using': by,
'value': value})['value']
def find_elements(self, by=By.ID, value=None):
"""
'Private' method used by the find_elements_by_* methods.
:Usage:
Use the corresponding find_elements_by_* instead of this.
:rtype: list of WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENTS, {
'using': by,
'value': value})['value']
@property
def desired_capabilities(self):
"""
returns the drivers current desired capabilities being used
"""
return self.capabilities
def get_screenshot_as_file(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to.
:Usage:
driver.get_screenshot_as_file('/Screenshots/foo.png')
"""
png = self.get_screenshot_as_png()
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
save_screenshot = get_screenshot_as_file
def get_screenshot_as_png(self):
"""
Gets the screenshot of the current window as a binary data.
:Usage:
driver.get_screenshot_as_png()
"""
return base64.b64decode(self.get_screenshot_as_base64().encode('ascii'))
def get_screenshot_as_base64(self):
"""
Gets the screenshot of the current window as a base64 encoded string
which is useful in embedded images in HTML.
:Usage:
driver.get_screenshot_as_base64()
"""
return self.execute(Command.SCREENSHOT)['value']
def set_window_size(self, width, height, windowHandle='current'):
"""
Sets the width and height of the current window. (window.resizeTo)
:Args:
- width: the width in pixels to set the window to
- height: the height in pixels to set the window to
:Usage:
driver.set_window_size(800,600)
"""
command = Command.SET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_SET_WINDOW_SIZE
self.execute(command, {
'width': int(width),
'height': int(height),
'windowHandle': windowHandle})
def get_window_size(self, windowHandle='current'):
"""
Gets the width and height of the current window.
:Usage:
driver.get_window_size()
"""
command = Command.GET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_GET_WINDOW_SIZE
size = self.execute(command, {'windowHandle': windowHandle})
if size.get('value', None) is not None:
return size['value']
else:
return size
def set_window_position(self, x, y, windowHandle='current'):
"""
Sets the x,y position of the current window. (window.moveTo)
:Args:
- x: the x-coordinate in pixels to set the window position
- y: the y-coordinate in pixels to set the window position
:Usage:
driver.set_window_position(0,0)
"""
if self.w3c:
return self.execute(Command.W3C_SET_WINDOW_POSITION, {
'x': int(x),
'y': int(y)
})
else:
self.execute(Command.SET_WINDOW_POSITION,
{
'x': int(x),
'y': int(y),
'windowHandle': windowHandle
})
def get_window_position(self, windowHandle='current'):
"""
Gets the x,y position of the current window.
:Usage:
driver.get_window_position()
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_POSITION)['value']
else:
return self.execute(Command.GET_WINDOW_POSITION, {
'windowHandle': windowHandle})['value']
def get_window_rect(self):
"""
Gets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.get_window_rect()
"""
return self.execute(Command.GET_WINDOW_RECT)['value']
def set_window_rect(self, x=None, y=None, width=None, height=None):
"""
Sets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.set_window_rect(x=10, y=10)
driver.set_window_rect(width=100, height=200)
driver.set_window_rect(x=10, y=10, width=100, height=200)
"""
if (x is None and y is None) and (height is None and width is None):
raise InvalidArgumentException("x and y or height and width need values")
return self.execute(Command.SET_WINDOW_RECT, {"x": x, "y": y,
"width": width,
"height": height})['value']
@property
def file_detector(self):
return self._file_detector
@file_detector.setter
def file_detector(self, detector):
"""
Set the file detector to be used when sending keyboard input.
By default, this is set to a file detector that does nothing.
see FileDetector
see LocalFileDetector
see UselessFileDetector
:Args:
- detector: The detector to use. Must not be None.
"""
if detector is None:
raise WebDriverException("You may not set a file detector that is null")
if not isinstance(detector, FileDetector):
raise WebDriverException("Detector has to be instance of FileDetector")
self._file_detector = detector
@property
def orientation(self):
"""
Gets the current orientation of the device
:Usage:
orientation = driver.orientation
"""
return self.execute(Command.GET_SCREEN_ORIENTATION)['value']
@orientation.setter
def orientation(self, value):
"""
Sets the current orientation of the device
:Args:
- value: orientation to set it to.
:Usage:
driver.orientation = 'landscape'
"""
allowed_values = ['LANDSCAPE', 'PORTRAIT']
if value.upper() in allowed_values:
self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value})
else:
raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
@property
def application_cache(self):
""" Returns a ApplicationCache Object to interact with the browser app cache"""
return ApplicationCache(self)
@property
def log_types(self):
"""
Gets a list of the available log types
:Usage:
driver.log_types
"""
return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value']
def get_log(self, log_type):
"""
Gets the log for a given log type
:Args:
- log_type: type of log that which will be returned
:Usage:
driver.get_log('browser')
driver.get_log('driver')
driver.get_log('client')
driver.get_log('server')
"""
return self.execute(Command.GET_LOG, {'type': log_type})['value']
| 1 | 14,439 | This needs to be just return self.get_screenshot_as_file(filename) | SeleniumHQ-selenium | rb |
@@ -0,0 +1,6 @@
+function isNetworkError (xhr) {
+ if (!xhr) return
+ return (xhr.readyState !== 0 && xhr.readyState !== 4) || xhr.status === 0
+}
+
+module.exports = isNetworkError | 1 | 1 | 12,837 | Shouldn't this return false here? | transloadit-uppy | js |
|
@@ -92,6 +92,9 @@ type Config struct {
// Path to a directory containing the container's root filesystem.
Rootfs string `json:"rootfs"`
+ // Umask is the umask to use inside of the container.
+ Umask *uint32 `json:"umask"`
+
// Readonlyfs will remount the container's rootfs as readonly where only externally mounted
// bind mounts are writtable.
Readonlyfs bool `json:"readonlyfs"` | 1 | package configs
import (
"bytes"
"encoding/json"
"fmt"
"os/exec"
"time"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type Rlimit struct {
Type int `json:"type"`
Hard uint64 `json:"hard"`
Soft uint64 `json:"soft"`
}
// IDMap represents UID/GID Mappings for User Namespaces.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
}
// Seccomp represents syscall restrictions
// By default, only the native architecture of the kernel is allowed to be used
// for syscalls. Additional architectures can be added by specifying them in
// Architectures.
type Seccomp struct {
DefaultAction Action `json:"default_action"`
Architectures []string `json:"architectures"`
Syscalls []*Syscall `json:"syscalls"`
}
// Action is taken upon rule match in Seccomp
type Action int
const (
Kill Action = iota + 1
Errno
Trap
Allow
Trace
Log
)
// Operator is a comparison operator to be used when matching syscall arguments in Seccomp
type Operator int
const (
EqualTo Operator = iota + 1
NotEqualTo
GreaterThan
GreaterThanOrEqualTo
LessThan
LessThanOrEqualTo
MaskEqualTo
)
// Arg is a rule to match a specific syscall argument in Seccomp
type Arg struct {
Index uint `json:"index"`
Value uint64 `json:"value"`
ValueTwo uint64 `json:"value_two"`
Op Operator `json:"op"`
}
// Syscall is a rule to match a syscall in Seccomp
type Syscall struct {
Name string `json:"name"`
Action Action `json:"action"`
ErrnoRet *uint `json:"errnoRet"`
Args []*Arg `json:"args"`
}
// TODO Windows. Many of these fields should be factored out into those parts
// which are common across platforms, and those which are platform specific.
// Config defines configuration options for executing a process inside a contained environment.
type Config struct {
// NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
// This is a common option when the container is running in ramdisk
NoPivotRoot bool `json:"no_pivot_root"`
// ParentDeathSignal specifies the signal that is sent to the container's process in the case
// that the parent process dies.
ParentDeathSignal int `json:"parent_death_signal"`
// Path to a directory containing the container's root filesystem.
Rootfs string `json:"rootfs"`
// Readonlyfs will remount the container's rootfs as readonly where only externally mounted
// bind mounts are writtable.
Readonlyfs bool `json:"readonlyfs"`
// Specifies the mount propagation flags to be applied to /.
RootPropagation int `json:"rootPropagation"`
// Mounts specify additional source and destination paths that will be mounted inside the container's
// rootfs and mount namespace if specified
Mounts []*Mount `json:"mounts"`
// The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
Devices []*Device `json:"devices"`
MountLabel string `json:"mount_label"`
// Hostname optionally sets the container's hostname if provided
Hostname string `json:"hostname"`
// Namespaces specifies the container's namespaces that it should setup when cloning the init process
// If a namespace is not provided that namespace is shared from the container's parent process
Namespaces Namespaces `json:"namespaces"`
// Capabilities specify the capabilities to keep when executing the process inside the container
// All capabilities not specified will be dropped from the processes capability mask
Capabilities *Capabilities `json:"capabilities"`
// Networks specifies the container's network setup to be created
Networks []*Network `json:"networks"`
// Routes can be specified to create entries in the route table as the container is started
Routes []*Route `json:"routes"`
// Cgroups specifies specific cgroup settings for the various subsystems that the container is
// placed into to limit the resources the container has available
Cgroups *Cgroup `json:"cgroups"`
// AppArmorProfile specifies the profile to apply to the process running in the container and is
// change at the time the process is execed
AppArmorProfile string `json:"apparmor_profile,omitempty"`
// ProcessLabel specifies the label to apply to the process running in the container. It is
// commonly used by selinux
ProcessLabel string `json:"process_label,omitempty"`
// Rlimits specifies the resource limits, such as max open files, to set in the container
// If Rlimits are not set, the container will inherit rlimits from the parent process
Rlimits []Rlimit `json:"rlimits,omitempty"`
// OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores
// for a process. Valid values are between the range [-1000, '1000'], where processes with
// higher scores are preferred for being killed. If it is unset then we don't touch the current
// value.
// More information about kernel oom score calculation here: https://lwn.net/Articles/317814/
OomScoreAdj *int `json:"oom_score_adj,omitempty"`
// UidMappings is an array of User ID mappings for User Namespaces
UidMappings []IDMap `json:"uid_mappings"`
// GidMappings is an array of Group ID mappings for User Namespaces
GidMappings []IDMap `json:"gid_mappings"`
// MaskPaths specifies paths within the container's rootfs to mask over with a bind
// mount pointing to /dev/null as to prevent reads of the file.
MaskPaths []string `json:"mask_paths"`
// ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
// so that these files prevent any writes.
ReadonlyPaths []string `json:"readonly_paths"`
// Sysctl is a map of properties and their values. It is the equivalent of using
// sysctl -w my.property.name value in Linux.
Sysctl map[string]string `json:"sysctl"`
// Seccomp allows actions to be taken whenever a syscall is made within the container.
// A number of rules are given, each having an action to be taken if a syscall matches it.
// A default action to be taken if no rules match is also given.
Seccomp *Seccomp `json:"seccomp"`
// NoNewPrivileges controls whether processes in the container can gain additional privileges.
NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
// Hooks are a collection of actions to perform at various container lifecycle events.
// CommandHooks are serialized to JSON, but other hooks are not.
Hooks Hooks
// Version is the version of opencontainer specification that is supported.
Version string `json:"version"`
// Labels are user defined metadata that is stored in the config and populated on the state
Labels []string `json:"labels"`
// NoNewKeyring will not allocated a new session keyring for the container. It will use the
// callers keyring in this case.
NoNewKeyring bool `json:"no_new_keyring"`
// IntelRdt specifies settings for Intel RDT group that the container is placed into
// to limit the resources (e.g., L3 cache, memory bandwidth) the container has available
IntelRdt *IntelRdt `json:"intel_rdt,omitempty"`
// RootlessEUID is set when the runc was launched with non-zero EUID.
// Note that RootlessEUID is set to false when launched with EUID=0 in userns.
// When RootlessEUID is set, runc creates a new userns for the container.
// (config.json needs to contain userns settings)
RootlessEUID bool `json:"rootless_euid,omitempty"`
// RootlessCgroups is set when unlikely to have the full access to cgroups.
// When RootlessCgroups is set, cgroups errors are ignored.
RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
}
type HookName string
type HookList []Hook
type Hooks map[HookName]HookList
const (
// Prestart commands are executed after the container namespaces are created,
// but before the user supplied command is executed from init.
// Note: This hook is now deprecated
// Prestart commands are called in the Runtime namespace.
Prestart HookName = "prestart"
// CreateRuntime commands MUST be called as part of the create operation after
// the runtime environment has been created but before the pivot_root has been executed.
// CreateRuntime is called immediately after the deprecated Prestart hook.
// CreateRuntime commands are called in the Runtime Namespace.
CreateRuntime = "createRuntime"
// CreateContainer commands MUST be called as part of the create operation after
// the runtime environment has been created but before the pivot_root has been executed.
// CreateContainer commands are called in the Container namespace.
CreateContainer = "createContainer"
// StartContainer commands MUST be called as part of the start operation and before
// the container process is started.
// StartContainer commands are called in the Container namespace.
StartContainer = "startContainer"
// Poststart commands are executed after the container init process starts.
// Poststart commands are called in the Runtime Namespace.
Poststart = "poststart"
// Poststop commands are executed after the container init process exits.
// Poststop commands are called in the Runtime Namespace.
Poststop = "poststop"
)
type Capabilities struct {
// Bounding is the set of capabilities checked by the kernel.
Bounding []string
// Effective is the set of capabilities checked by the kernel.
Effective []string
// Inheritable is the capabilities preserved across execve.
Inheritable []string
// Permitted is the limiting superset for effective capabilities.
Permitted []string
// Ambient is the ambient set of capabilities that are kept.
Ambient []string
}
func (hooks HookList) RunHooks(state *specs.State) error {
for i, h := range hooks {
if err := h.Run(state); err != nil {
return errors.Wrapf(err, "Running hook #%d:", i)
}
}
return nil
}
func (hooks *Hooks) UnmarshalJSON(b []byte) error {
var state map[HookName][]CommandHook
if err := json.Unmarshal(b, &state); err != nil {
return err
}
*hooks = Hooks{}
for n, commandHooks := range state {
if len(commandHooks) == 0 {
continue
}
(*hooks)[n] = HookList{}
for _, h := range commandHooks {
(*hooks)[n] = append((*hooks)[n], h)
}
}
return nil
}
func (hooks *Hooks) MarshalJSON() ([]byte, error) {
serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
for _, hook := range hooks {
switch chook := hook.(type) {
case CommandHook:
serializableHooks = append(serializableHooks, chook)
default:
logrus.Warnf("cannot serialize hook of type %T, skipping", hook)
}
}
return serializableHooks
}
return json.Marshal(map[string]interface{}{
"prestart": serialize((*hooks)[Prestart]),
"createRuntime": serialize((*hooks)[CreateRuntime]),
"createContainer": serialize((*hooks)[CreateContainer]),
"startContainer": serialize((*hooks)[StartContainer]),
"poststart": serialize((*hooks)[Poststart]),
"poststop": serialize((*hooks)[Poststop]),
})
}
type Hook interface {
// Run executes the hook with the provided state.
Run(*specs.State) error
}
// NewFunctionHook will call the provided function when the hook is run.
func NewFunctionHook(f func(*specs.State) error) FuncHook {
return FuncHook{
run: f,
}
}
type FuncHook struct {
run func(*specs.State) error
}
func (f FuncHook) Run(s *specs.State) error {
return f.run(s)
}
type Command struct {
Path string `json:"path"`
Args []string `json:"args"`
Env []string `json:"env"`
Dir string `json:"dir"`
Timeout *time.Duration `json:"timeout"`
}
// NewCommandHook will execute the provided command when the hook is run.
func NewCommandHook(cmd Command) CommandHook {
return CommandHook{
Command: cmd,
}
}
type CommandHook struct {
Command
}
func (c Command) Run(s *specs.State) error {
b, err := json.Marshal(s)
if err != nil {
return err
}
var stdout, stderr bytes.Buffer
cmd := exec.Cmd{
Path: c.Path,
Args: c.Args,
Env: c.Env,
Stdin: bytes.NewReader(b),
Stdout: &stdout,
Stderr: &stderr,
}
if err := cmd.Start(); err != nil {
return err
}
errC := make(chan error, 1)
go func() {
err := cmd.Wait()
if err != nil {
err = fmt.Errorf("error running hook: %v, stdout: %s, stderr: %s", err, stdout.String(), stderr.String())
}
errC <- err
}()
var timerCh <-chan time.Time
if c.Timeout != nil {
timer := time.NewTimer(*c.Timeout)
defer timer.Stop()
timerCh = timer.C
}
select {
case err := <-errC:
return err
case <-timerCh:
cmd.Process.Kill()
cmd.Wait()
return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds())
}
}
| 1 | 20,825 | Should have json tag | opencontainers-runc | go |
@@ -180,7 +180,7 @@ static const char *const PORT_ERROR[PORT_ERROR_COUNT] = {
/*
* errors command configuration, set during parse_args()
*/
-static struct errors_config {
+STATIC struct errors_config {
bool clear;
int force_count;
enum verbs_index which; | 1 | // Copyright(c) 2018, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/*
* @file errors.c
*
* @brief fpga error reporting
*
*/
#include <getopt.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "fpgainfo.h"
#include "safe_string/safe_string.h"
#include <opae/properties.h>
#include "errors.h"
#define FPGA_BIT_IS_SET(val, index) (((val) >> (index)) & 1)
const char *supported_verbs[] = {"all", "fme", "port"};
enum verbs_index { VERB_ALL = 0, VERB_FME, VERB_PORT, VERB_MAX };
#define FME_ERROR_COUNT 7
static const char *const FME_ERROR[FME_ERROR_COUNT] = {
"Fabric error detected",
"Fabric fifo under / overflow error detected",
"KTI CDC Parity Error detected",
"KTI CDC Parity Error detected",
"IOMMU Parity error detected",
"AFU PF/VF access mismatch detected",
"Indicates an MBP event error detected"};
#define PCIE0_ERROR_COUNT 10
static const char *const PCIE0_ERROR[PCIE0_ERROR_COUNT] = {
"TLP format/type error detected", "TTLP MW address error detected",
"TLP MW length error detected", "TLP MR address error detected",
"TLP MR length error detected", "TLP CPL tag error detected",
"TLP CPL status error detected", "TLP CPL timeout error detected",
"CCI bridge parity error detected", "TLP with EP error detected"};
#define PCIE1_ERROR_COUNT 10
static const char *const PCIE1_ERROR[PCIE1_ERROR_COUNT] = {
"TLP format/type error detected", "TTLP MW address error detected",
"TLP MW length error detected", "TLP MR address error detected",
"TLP MR length error detected", "TLP CPL tag error detected",
"TLP CPL status error detected", "TLP CPL timeout error detected",
"CCI bridge parity error detected", "TLP with EP error detected"};
#define NONFATAL_ERROR_COUNT 13
static const char *const NONFATAL_ERROR[NONFATAL_ERROR_COUNT] = {
"Temperature threshold triggered AP1 detected",
"Temperature threshold triggered AP2 detected",
"PCIe error detected",
"AFU port Fatal error detected",
"ProcHot event error detected",
"AFU PF/VF access mismatch error detected",
"Injected Warning Error detected",
"Reserved",
"Reserved",
"Temperature threshold triggered AP6 detected",
"Power threshold triggered AP1 error detected",
"Power threshold triggered AP2 error detected",
"MBP event error detected"};
#define CATFATAL_ERROR_COUNT 12
static const char *const CATFATAL_ERROR[CATFATAL_ERROR_COUNT] = {
"KTI link layer error detected.",
"tag-n-cache error detected.",
"CCI error detected.",
"KTI protocol error detected.",
"Fatal DRAM error detected",
"IOMMU fatal parity error detected.",
"Fabric fatal error detected",
"Poison error from any of PCIe ports detected",
"Injected Fatal Error detected",
"Catastrophic CRC error detected",
"Catastrophic thermal runaway event detected",
"Injected Catastrophic Error detected"};
#define INJECT_ERROR_COUNT 3
static const char *const INJECT_ERROR[INJECT_ERROR_COUNT] = {
"Set Catastrophic error .", "Set Fatal error.",
"Ser Non-fatal error ."};
#define PORT_ERROR_COUNT 60
static const char *const PORT_ERROR[PORT_ERROR_COUNT] = {
// 0
"Tx Channel 0 overflow error detected.",
"Tx Channel 0 invalid request encoding error detected.",
"Tx Channel 0 cl_len=3 not supported error detected.",
"Tx Channel 0 request with cl_len=2 does NOT have a 2CL aligned address error detected.",
"Tx Channel 0 request with cl_len=4 does NOT have a 4CL aligned address error detected.",
"RSVD.",
"RSVD.",
"RSVD.",
"RSVD.",
"AFU MMIO RD received while PORT is in reset error detected",
// 10
"AFU MMIO WR received while PORT is in reset error detected",
"RSVD.",
"RSVD.",
"RSVD.",
"RSVD.",
"RSVD.",
"Tx Channel 1 invalid request encoding error detected",
"Tx Channel 1 cl_len=3 not supported error detected.",
"Tx Channel 1 request with cl_len=2 does NOT have a 2CL aligned address error detected",
"Tx Channel 1 request with cl_len=4 does NOT have a 4CL aligned address error detected",
// 20
"Tx Channel 1 insufficient data payload Error detected",
"Tx Channel 1 data payload overrun error detected",
"Tx Channel 1 incorrect address on subsequent payloads error detected",
"Tx Channel 1 Non-zero SOP detected for requests!=WrLine_* error detected",
"Tx Channel 1 SOP expected to be 0 for req_type!=WrLine_*",
"Tx Channel 1 Illegal VC_SEL. Atomic request is only supported on VL0 error detected",
"RSVD.",
"RSVD.",
"RSVD.",
"RSVD.",
// 30
"RSVD.",
"RSVD.",
"MMIO TimedOut error detected",
"Tx Channel 2 fifo overflo error detected",
"MMIO Read response received, with no matching request pending error detected",
"RSVD.",
"RSVD.",
"RSVD.",
"RSVD.",
"RSVD.",
// 40
"Number of pending requests: counter overflow error detected",
"Request with Address violating SMM range error detected",
"Request with Address violating second SMM range error detected",
"Request with Address violating ME stolen range",
"Request with Address violating Generic protected range error detected ",
"Request with Address violating Legacy Range Low error detected",
"Request with Address violating Legacy Range High error detected",
"Request with Address violating VGA memory range error detected",
"Page Fault error detected",
"PMR Erro error detected",
// 50
"AP6 event detected",
"VF FLR detected on port when PORT configured in PF access mode error detected ",
"RSVD.",
"RSVD.",
"RSVD.",
"RSVD.",
"Tx Channel 1 byte_len cannot be zero",
"Tx Channel 1 illegal operation: sum of byte_len and byte_start should be less than or equal to 64",
"Tx Channel 1 illegal operation: cl_len cannot be non-zero when mode is eMOD_BYTE",
"Tx Channel 1 byte_len and byte_start should be zero when mode is not eMOD_BYTE"
};
/*
* errors command configuration, set during parse_args()
*/
static struct errors_config {
bool clear;
int force_count;
enum verbs_index which;
int help_only;
} errors_config = {.clear = false, .which = VERB_ALL, .help_only = false};
/*
* Print help
*/
void errors_help(void)
{
unsigned int i;
printf("\nPrint and clear errors\n"
" fpgainfo errors [-h] [-c] {");
printf("%s", supported_verbs[0]);
for (i = 1; i < sizeof(supported_verbs) / sizeof(supported_verbs[0]);
i++) {
printf(",%s", supported_verbs[i]);
}
printf("}\n\n"
" -h,--help Print this help\n"
" -c,--clear Clear all errors\n"
" --force Retry clearing errors 64 times\n"
" to clear certain error conditions\n"
"\n");
errors_config.help_only = true;
}
#define ERRORS_GETOPT_STRING ":chf"
int parse_error_args(int argc, char *argv[])
{
optind = 0;
struct option longopts[] = {
{"clear", no_argument, NULL, 'c'},
{"force", no_argument, NULL, 'f'},
{"help", no_argument, NULL, 'h'},
{0, 0, 0, 0},
};
int getopt_ret;
int option_index;
errors_config.force_count = 1;
while (-1
!= (getopt_ret = getopt_long(argc, argv, ERRORS_GETOPT_STRING,
longopts, &option_index))) {
const char *tmp_optarg = optarg;
if ((optarg) && ('=' == *tmp_optarg)) {
++tmp_optarg;
}
switch (getopt_ret) {
case 'c': /* clear */
errors_config.clear = true;
break;
case 'f': /* Force */
errors_config.clear = true;
errors_config.force_count = 64;
break;
case 'h': /* help */
errors_help();
return -1;
case ':': /* missing option argument */
OPAE_ERR("Missing option argument\n");
errors_help();
return -1;
case '?':
default: /* invalid option */
OPAE_ERR("Invalid cmdline options\n");
errors_help();
return -1;
}
}
// The word after 'errors' should be what to operate on ("all", "fme",
// or "port")
optind++;
if (argc < optind + 1) {
OPAE_ERR("Not enough parameters\n");
errors_help();
return -1;
}
int cmp = 0;
if ((optind < argc) &&
strcmp_s(argv[optind - 1], RSIZE_MAX_STR, "errors", &cmp) == EOK &&
cmp == 0) {
char *verb = argv[optind];
size_t idx = str_in_list(verb, supported_verbs, VERB_MAX);
if (idx < VERB_MAX) {
errors_config.which = idx;
} else {
OPAE_ERR("Not a valid errors resource spec: %s\n", verb);
errors_help();
return -1;
}
} else {
OPAE_ERR("Not a valid errors resource spec: %s\n",
argv[optind - 1]);
errors_help();
return -1;
}
return 0;
}
fpga_result errors_filter(fpga_properties *filter, int argc, char *argv[])
{
fpga_result res = FPGA_OK;
if (0 == parse_error_args(argc, argv)) {
switch (errors_config.which) {
case VERB_FME:
res = fpgaPropertiesSetObjectType(*filter, FPGA_DEVICE);
ON_FPGAINFO_ERR_GOTO(res, out,
"setting type to FPGA_DEVICE");
break;
case VERB_PORT:
res = fpgaPropertiesSetObjectType(*filter,
FPGA_ACCELERATOR);
ON_FPGAINFO_ERR_GOTO(
res, out, "setting type to FPGA_ACCELERATOR");
break;
case VERB_ALL:
default:
break;
}
}
out:
return res;
}
static void print_errors_info(fpga_token token, fpga_properties props,
struct fpga_error_info *errinfos,
uint32_t num_errors)
{
int i;
int j;
fpga_result res = FPGA_OK;
fpga_objtype objtype;
const char *const *error_string = NULL;
int size = 0;
if ((NULL == errinfos) || (0 == num_errors)) {
return;
}
if (errors_config.clear) {
for (i = 0; i < errors_config.force_count; i++) {
fpgaClearAllErrors(token);
}
}
res = fpgaPropertiesGetObjectType(props, &objtype);
fpgainfo_print_err("reading objtype from properties", res);
if (((VERB_ALL == errors_config.which)
|| (VERB_FME == errors_config.which))
&& (FPGA_DEVICE == objtype)) {
fpgainfo_print_common("//****** FME ERRORS ******//", props);
for (i = 0; i < (int)num_errors; i++) {
uint64_t error_value = 0;
res = fpgaReadError(token, i, &error_value);
fpgainfo_print_err("reading error for FME", res);
printf("%-32s : 0x%" PRIX64 "\n", errinfos[i].name,
error_value);
int cmp = 0;
if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"Errors", &cmp) == EOK && cmp == 0) {
size = FME_ERROR_COUNT;
error_string = FME_ERROR;
} else if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"Next Error", &cmp) == EOK && cmp == 0) {
size = 0;
error_string = NULL;
} else if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"First Error", &cmp) == EOK && cmp == 0) {
size = 0;
error_string = NULL;
} else if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"PCIe0 Errors", &cmp) == EOK && cmp == 0) {
size = PCIE0_ERROR_COUNT;
error_string = PCIE0_ERROR;
} else if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"Inject Error", &cmp) == EOK && cmp == 0) {
size = INJECT_ERROR_COUNT;
error_string = INJECT_ERROR;
} else if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"Catfatal Errors", &cmp) == EOK && cmp == 0) {
size = CATFATAL_ERROR_COUNT;
error_string = CATFATAL_ERROR;
} else if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"Nonfatal Errors", &cmp) == EOK && cmp == 0) {
size = NONFATAL_ERROR_COUNT;
error_string = NONFATAL_ERROR;
} else if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"PCIe1 Errors", &cmp) == EOK && cmp == 0) {
size = PCIE1_ERROR_COUNT;
error_string = PCIE1_ERROR;
}
for (j = 0; (j < size) && (NULL != error_string); j++) {
if (FPGA_BIT_IS_SET(error_value, j)) {
printf("\t %s \n", error_string[j]);
}
}
}
} else if (((VERB_ALL == errors_config.which)
|| (VERB_PORT == errors_config.which))
&& (FPGA_ACCELERATOR == objtype)) {
fpgainfo_print_common("//****** PORT ERRORS ******//", props);
for (i = 0; i < (int)num_errors; i++) {
uint64_t error_value = 0;
res = fpgaReadError(token, i, &error_value);
fpgainfo_print_err("reading error for PORT", res);
printf("%-32s : 0x%" PRIX64 "\n", errinfos[i].name,
error_value);
int cmp = 0;
if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"Errors", &cmp) == EOK && cmp == 0) {
size = PORT_ERROR_COUNT;
error_string = PORT_ERROR;
} else if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"First Malformed Req", &cmp) == EOK && cmp == 0) {
size = 0;
error_string = NULL;
} else if (strcmp_s(errinfos[i].name, RSIZE_MAX_STR,
"First Error", &cmp) == EOK && cmp == 0) {
size = 0;
error_string = NULL;
}
for (j = 0; (j < size) && (NULL != error_string); j++) {
if (FPGA_BIT_IS_SET(error_value, j)) {
printf("\t %s \n", error_string[j]);
}
}
}
}
}
fpga_result errors_command(fpga_token *tokens, int num_tokens, int argc,
char *argv[])
{
(void)argc;
(void)argv;
fpga_result res = FPGA_OK;
fpga_properties props;
struct fpga_error_info *errinfos = NULL;
if (errors_config.help_only) {
return res;
}
int i = 0;
for (i = 0; i < num_tokens; ++i) {
uint32_t num_errors;
res = fpgaGetProperties(tokens[i], &props);
if (res == FPGA_OK) {
res = fpgaPropertiesGetNumErrors(props, &num_errors);
fpgainfo_print_err("reading errors from properties", res);
if (num_errors != 0) {
int j;
errinfos = (struct fpga_error_info *)calloc(
num_errors, sizeof(*errinfos));
if (!errinfos) {
res = FPGA_NO_MEMORY;
OPAE_ERR("Error allocating memory");
goto destroy_and_free;
}
for (j = 0; j < (int)num_errors; j++) {
res = fpgaGetErrorInfo(tokens[i], j,
&errinfos[j]);
fpgainfo_print_err(
"reading error info structure", res);
replace_chars(errinfos[j].name, '_', ' ');
upcase_pci(errinfos[j].name,
strnlen_s(errinfos[j].name, RSIZE_MAX_STR));
upcase_first(errinfos[j].name);
}
}
print_errors_info(tokens[i], props, errinfos, num_errors);
destroy_and_free:
free(errinfos);
errinfos = NULL;
fpgaDestroyProperties(&props);
if (res == FPGA_NO_MEMORY) {
break;
}
} else {
fpgainfo_print_err("reading properties from token", res);
}
}
return res;
}
| 1 | 19,096 | Is this needed after all? Looks like the struct was re-defined inside the test file. | OPAE-opae-sdk | c |
@@ -67,4 +67,11 @@ public interface ActionsProvider {
default ExpireSnapshots expireSnapshots(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement expireSnapshots");
}
+
+ /**
+ * Instantiates an action to remove all the files referenced by given metadata location.
+ */
+ default RemoveReachableFiles removeFiles(String metadataLocation) {
+ throw new UnsupportedOperationException(this.getClass().getName() + " does not implement removeFiles");
+ }
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.actions;
import org.apache.iceberg.Table;
/**
* An API that should be implemented by query engine integrations for providing actions.
*/
public interface ActionsProvider {
/**
* Instantiates an action to snapshot an existing table as a new Iceberg table.
*/
default SnapshotTable snapshotTable(String sourceTableIdent) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement snapshotTable");
}
/**
* Instantiates an action to migrate an existing table to Iceberg.
*/
default MigrateTable migrateTable(String tableIdent) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement migrateTable");
}
/**
* Instantiates an action to remove orphan files.
*/
default RemoveOrphanFiles removeOrphanFiles(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement removeOrphanFiles");
}
/**
* Instantiates an action to rewrite manifests.
*/
default RewriteManifests rewriteManifests(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement rewriteManifests");
}
/**
* Instantiates an action to rewrite data files.
*/
default RewriteDataFiles rewriteDataFiles(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement rewriteDataFiles");
}
/**
* Instantiates an action to expire snapshots.
*/
default ExpireSnapshots expireSnapshots(Table table) {
throw new UnsupportedOperationException(this.getClass().getName() + " does not implement expireSnapshots");
}
}
| 1 | 35,835 | I think the name of the method should match the name of the action: `removeReachableFiles`. | apache-iceberg | java |
@@ -106,6 +106,7 @@ echo "
<ul>
<li><a href=\"manage_apps.php\">Manage applications</a></li>
<li><a href=\"manage_app_versions.php\">Manage application versions</a></li>
+ <li><a href=\"manage_consent_types.php\">Manage Consent types</a></li>
<li> Manage jobs
<ul>
<li><a href=\"cancel_wu_form.php\">Cancel jobs by ID</a> | 1 | <?php
// This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2014 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
require_once("../inc/db_ops.inc");
require_once("../inc/util_ops.inc");
require_once("../inc/uotd.inc");
require_once("../project/project.inc");
$config = get_config();
$stripchart_cgi_url = parse_config($config, "<stripchart_cgi_url>");
db_init();
$title = "Project Management";
admin_page_head($title);
// Notification area
echo "<ul>\n";
if (!file_exists(".htaccess")) {
echo "<li><span style=\"color: #ff0000\">
The Project Management directory is not
protected from public access by a .htaccess file.
</span></li>
";
}
if (!defined("SYS_ADMIN_EMAIL")) {
echo "<li><span style=\"color: #ff0000\">
The defined constant SYS_ADMIN_EMAIL
has not been set. Please edit <tt>project/project.inc</tt> and set this
to an address which can be used to contact the project administrators.
</span></li>
";
}
if (parse_bool($config, "disable_account_creation")) {
echo "<li><span style=\"color: #ff9900\">
Account creation is disabled.</span></li>
";
}
if (defined("INVITE_CODES")) {
echo "<li><span style=\"color: #ff9900\">
Account creation is restricted by the use of
invitation codes.</span></li>
";
}
$uotd_candidates = count_uotd_candidates();
if ($uotd_candidates >= 0) {
if ($uotd_candidates >= UOTD_THRESHOLD*2) {
$color = "#00aa00";
} elseif ($uotd_candidates < UOTD_THRESHOLD) {
$color = "#ff0000";
} else {
$color = "#ff9900";
}
echo "<li><span style=\"color: ".$color."\">
There are ".$uotd_candidates." remaining candidates for User of the Day.
</span></li>
";
}
echo "</ul>\n";
if (function_exists('admin_index_extra')) {
admin_index_extra();
}
echo "
<p>
<table border=\"0\"><tr valign=\"top\">
<td><b>Browse database:</b>
<ul>
<li><a href=\"db_form.php?table=result&detail=low\">Results</a></li>
<li><a href=\"db_form.php?table=workunit\">Workunits</a></li>
<li><a href=\"db_form.php?table=host&detail=low\">Hosts</a></li>
<li><a href=\"db_form.php?table=user\">Users</a> (<a href=\"list_new_users.php\">recently registered</a>)</li>
<li><a href=\"db_form.php?table=team\">Teams</a></li>
<li><a href=\"db_action.php?table=app\">Applications</a></li>
<li><a href=\"db_form.php?table=app_version\">Application versions</a></li>
<li><a href=\"db_action.php?table=platform\">Platforms</a></li>
<li><a href=dbinfo.php>DB row counts and disk usage</a>
<li><a href=\"show_log.php?f=mysql*.log&l=-20\">Tail MySQL logs</a>
</ul>
</td>
<td><b>Computing</b>
<ul>
<li><a href=\"manage_apps.php\">Manage applications</a></li>
<li><a href=\"manage_app_versions.php\">Manage application versions</a></li>
<li> Manage jobs
<ul>
<li><a href=\"cancel_wu_form.php\">Cancel jobs by ID</a>
<li><a href=\"cancel_workunits.php\">Cancel jobs by SQL clause</a>
<li><a href=transition_all.php>Transition jobs</a>
<p class=\"text-muted\">(this can 'unstick' old jobs)</p>
<li><a href=\"revalidate.php\">Re-validate jobs</a>
<li><a href=assign.php>Assigned jobs</a>
</ul>
<li><a href=\"job_times.php\">FLOP count statistics</a>
<li><a href=\"$stripchart_cgi_url/stripchart.cgi\">Stripcharts</a>
<li><a href=\"show_log.php\">Show/Grep logs</a>
<li>
<form method=\"get\" action=\"clear_host.php\">
<input class=\"btn btn-default\" type=\"submit\" value=\"Clear RPC seqno\">
host ID:
<input type=\"text\" size=\"5\" name=\"hostid\">
</form>
</ul>
</td>
<td><b>User management</b>
<ul>
<li><a href=".url_base()."/forum_index.php>Post news item</a></li>
<li><a href=\"profile_screen_form.php\">Screen user profiles </a></li>
<li><a href=\"badge_admin.php\">Badges</a></li>
<li><a href=\"manage_special_users.php\">User privileges</a></li>
<li><a href=".url_base()."/manage_project.php>User job submission privileges</a></li>
<li><a href=\"mass_email.php\">Send mass email to a selected set of users</a></li>
<li><form action=\"manage_user.php\">
<input class=\"btn btn-default\" type=\"submit\" value=\"Manage user\">
ID: <input name=\"userid\">
</form>
</li>
</ul>
</td>
</tr>
</table>
";
// Result Summaries:
$show_deprecated = get_str("show_deprecated", true);
$show_only = array("all"); // Add all appids you want to display, or "all"
$apps = BoincApp::enum("");
foreach ($apps as $app) {
if (in_array($app->id, $show_only)
|| ( in_array("all", $show_only)
&& (!$app->deprecated || $show_deprecated)
)) {
echo "
<b>Results for <tt>$app->name</tt>:</b>
<ul>
";
for ($i=0; $i<2; $i++) {
if ($i) {
$secs = 7*86400;
$period = " 7 days";
} else {
$secs = 86400;
$period = "24 hours";
}
echo "
<li> Past $period:
<a href=\"result_summary.php?appid=$app->id&nsecs=$secs\">
summary
</a> |
<a href=\"pass_percentage_by_platform.php?appid=$app->id&nsecs=$secs\">
summary per app version
</a> |
<a href=\"failure_result_summary_by_host.php?appid=$app->id&nsecs=$secs\">
failures broken down by (app version, host)
</a> |
<a href=\"failure_result_summary_by_platform.php?appid=$app->id&nsecs=$secs\">
failures broken down by (app version, error)
</a>
";
}
echo " </ul> ";
}
}
if ($show_deprecated) {
echo "<a href=\"index.php?show_deprecated=0\">Hide deprecated applications</a>";
} else {
echo "<a href=\"index.php?show_deprecated=1\">Show deprecated applications</a>";
}
echo "<h3>Periodic tasks</h3>
The following scripts should be run as periodic tasks, not via this web page
(see <a href=\"https://boinc.berkeley.edu/trac/wiki/ProjectTasks\">https://boinc.berkeley.edu/trac/wiki/ProjectTasks</a>):
<pre>
update_forum_activities.php, update_profile_pages.php, update_uotd.php, delete_expired_tokens.php
</pre>
<h3>Repair tasks</h3>
The following scripts do one-time repair operations.
Run them manually on the command line as needed
(i.e. <tt>php scriptname.php</tt>):
<pre>forum_repair.php, team_repair.php, repair_validator_problem.php</pre>
<h3>Cleanup tasks</h3>
You can run the following as a periodic task, on the command line,
or by clicking here:
<ul>
<li> <a href=remove_zombie_hosts.php>remove_zombie_hosts.php</a> Remove zombie host records
</ul>
";
admin_page_tail();
$cvs_version_tracker[]="\$Id$"; //Generated automatically - do not edit
?>
| 1 | 10,935 | can you make the C in Consent lower case to match the other ones in this list? | BOINC-boinc | php |
@@ -16,6 +16,7 @@ import (
"sync"
"testing"
+ "github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action" | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"encoding/hex"
"fmt"
"io/ioutil"
"math/big"
"os"
"sync"
"testing"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
ta "github.com/iotexproject/iotex-core/test/testaddress"
"github.com/iotexproject/iotex-core/testutil"
)
func addTestingTsfBlocks(bc Blockchain) error {
// Add block 0
tsf0, _ := action.NewTransfer(
1,
big.NewInt(90000000),
ta.Addrinfo["producer"].String(),
[]byte{}, uint64(100000),
big.NewInt(10),
)
bd := &action.EnvelopeBuilder{}
elp := bd.SetAction(tsf0).
SetNonce(1).
SetGasLimit(100000).
SetGasPrice(big.NewInt(10)).Build()
selp, err := action.Sign(elp, identityset.PrivateKey(0))
if err != nil {
return err
}
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[identityset.Address(0).String()] = []action.SealedEnvelope{selp}
blk, err := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
addr0 := ta.Addrinfo["producer"].String()
priKey0 := ta.Keyinfo["producer"].PriKey
addr1 := ta.Addrinfo["alfa"].String()
priKey1 := ta.Keyinfo["alfa"].PriKey
addr2 := ta.Addrinfo["bravo"].String()
addr3 := ta.Addrinfo["charlie"].String()
priKey3 := ta.Keyinfo["charlie"].PriKey
addr4 := ta.Addrinfo["delta"].String()
priKey4 := ta.Keyinfo["delta"].PriKey
addr5 := ta.Addrinfo["echo"].String()
priKey5 := ta.Keyinfo["echo"].PriKey
addr6 := ta.Addrinfo["foxtrot"].String()
// Add block 1
// test --> A, B, C, D, E, F
tsf1, err := testutil.SignedTransfer(addr1, priKey0, 1, big.NewInt(20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf2, err := testutil.SignedTransfer(addr2, priKey0, 2, big.NewInt(30), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf3, err := testutil.SignedTransfer(addr3, priKey0, 3, big.NewInt(50), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf4, err := testutil.SignedTransfer(addr4, priKey0, 4, big.NewInt(70), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf5, err := testutil.SignedTransfer(addr5, priKey0, 5, big.NewInt(110), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf6, err := testutil.SignedTransfer(addr6, priKey0, 6, big.NewInt(50<<20), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
accMap := make(map[string][]action.SealedEnvelope)
accMap[addr0] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5, tsf6}
blk, err = bc.MintNewBlock(
accMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 2
// Charlie --> A, B, D, E, test
tsf1, err = testutil.SignedTransfer(addr1, priKey3, 1, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf2, err = testutil.SignedTransfer(addr2, priKey3, 2, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf3, err = testutil.SignedTransfer(addr4, priKey3, 3, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf4, err = testutil.SignedTransfer(addr5, priKey3, 4, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf5, err = testutil.SignedTransfer(addr0, priKey3, 5, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
accMap = make(map[string][]action.SealedEnvelope)
accMap[addr3] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5}
blk, err = bc.MintNewBlock(
accMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 3
// Delta --> B, E, F, test
tsf1, err = testutil.SignedTransfer(addr2, priKey4, 1, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf2, err = testutil.SignedTransfer(addr5, priKey4, 2, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf3, err = testutil.SignedTransfer(addr6, priKey4, 3, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf4, err = testutil.SignedTransfer(addr0, priKey4, 4, big.NewInt(1), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
accMap = make(map[string][]action.SealedEnvelope)
accMap[addr4] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4}
blk, err = bc.MintNewBlock(
accMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if err := bc.CommitBlock(blk); err != nil {
return err
}
// Add block 4
// Delta --> A, B, C, D, F, test
tsf1, err = testutil.SignedTransfer(addr1, priKey5, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf2, err = testutil.SignedTransfer(addr2, priKey5, 2, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf3, err = testutil.SignedTransfer(addr3, priKey5, 3, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf4, err = testutil.SignedTransfer(addr4, priKey5, 4, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf5, err = testutil.SignedTransfer(addr6, priKey5, 5, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf6, err = testutil.SignedTransfer(addr0, priKey5, 6, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf7, err := testutil.SignedTransfer(addr3, priKey3, 6, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
tsf8, err := testutil.SignedTransfer(addr1, priKey1, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
if err != nil {
return err
}
accMap = make(map[string][]action.SealedEnvelope)
accMap[addr5] = []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4, tsf5, tsf6}
accMap[addr3] = []action.SealedEnvelope{tsf7}
accMap[addr1] = []action.SealedEnvelope{tsf8}
blk, err = bc.MintNewBlock(
accMap,
testutil.TimestampNow(),
)
if err != nil {
return err
}
if err := bc.ValidateBlock(blk); err != nil {
return err
}
if blk.TxRoot() != blk.CalculateTxRoot() {
return err
}
return bc.CommitBlock(blk)
}
func TestCreateBlockchain(t *testing.T) {
require := require.New(t)
ctx := context.Background()
cfg := config.Default
// disable account-based testing
cfg.Chain.TrieDBPath = ""
// create chain
registry := protocol.Registry{}
acc := account.NewProtocol()
require.NoError(registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(registry.Register(rolldpos.ProtocolID, rp))
bc := NewBlockchain(cfg, InMemStateFactoryOption(), InMemDaoOption(), RegistryOption(®istry), EnableExperimentalActions())
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
bc.Validator().AddActionValidators(acc)
bc.GetFactory().AddActionHandlers(acc)
require.NoError(bc.Start(ctx))
require.NotNil(bc)
height := bc.TipHeight()
require.Equal(0, int(height))
fmt.Printf("Create blockchain pass, height = %d\n", height)
defer func() {
err := bc.Stop(ctx)
require.NoError(err)
}()
// add 4 sample blocks
require.NoError(addTestingTsfBlocks(bc))
height = bc.TipHeight()
require.Equal(5, int(height))
}
func TestBlockchain_MintNewBlock(t *testing.T) {
ctx := context.Background()
cfg := config.Default
cfg.Genesis.BlockGasLimit = uint64(100000)
registry := protocol.Registry{}
acc := account.NewProtocol()
require.NoError(t, registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
bc := NewBlockchain(cfg, InMemStateFactoryOption(), InMemDaoOption(), RegistryOption(®istry))
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
exec := execution.NewProtocol(bc)
require.NoError(t, registry.Register(execution.ProtocolID, exec))
bc.Validator().AddActionValidators(acc, exec)
bc.GetFactory().AddActionHandlers(acc, exec)
require.NoError(t, bc.Start(ctx))
defer func() {
require.NoError(t, bc.Stop(ctx))
}()
tsf, err := action.NewTransfer(
1,
big.NewInt(100000000),
ta.Addrinfo["producer"].String(),
[]byte{}, uint64(100000),
big.NewInt(10),
)
require.NoError(t, err)
data, _ := hex.DecodeString("608060405234801561001057600080fd5b5060df8061001f6000396000f3006080604052600436106049576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806360fe47b114604e5780636d4ce63c146078575b600080fd5b348015605957600080fd5b5060766004803603810190808035906020019092919050505060a0565b005b348015608357600080fd5b50608a60aa565b6040518082815260200191505060405180910390f35b8060008190555050565b600080549050905600a165627a7a7230582002faabbefbbda99b20217cf33cb8ab8100caf1542bf1f48117d72e2c59139aea0029")
execution, err := action.NewExecution(action.EmptyAddress, 2, big.NewInt(0), uint64(100000), big.NewInt(0), data)
require.NoError(t, err)
bd := &action.EnvelopeBuilder{}
elp1 := bd.SetAction(tsf).
SetNonce(1).
SetGasLimit(100000).
SetGasPrice(big.NewInt(10)).Build()
selp1, err := action.Sign(elp1, identityset.PrivateKey(0))
require.NoError(t, err)
// This execution should not be included in block because block is out of gas
elp2 := bd.SetAction(execution).
SetNonce(2).
SetGasLimit(100000).
SetGasPrice(big.NewInt(10)).Build()
selp2, err := action.Sign(elp2, identityset.PrivateKey(0))
require.NoError(t, err)
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[identityset.Address(0).String()] = []action.SealedEnvelope{selp1, selp2}
blk, err := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.NoError(t, err)
require.Equal(t, 2, len(blk.Actions))
require.Equal(t, 1, len(blk.Receipts))
var gasConsumed uint64
for _, receipt := range blk.Receipts {
gasConsumed += receipt.GasConsumed
}
require.True(t, gasConsumed <= cfg.Genesis.BlockGasLimit)
}
func TestBlockchain_MintNewBlock_PopAccount(t *testing.T) {
ctx := context.Background()
cfg := config.Default
registry := protocol.Registry{}
acc := account.NewProtocol()
require.NoError(t, registry.Register(account.ProtocolID, acc))
bc := NewBlockchain(cfg, InMemStateFactoryOption(), InMemDaoOption(), RegistryOption(®istry), EnableExperimentalActions())
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
bc.Validator().AddActionValidators(acc)
bc.GetFactory().AddActionHandlers(acc)
require.NoError(t, bc.Start(ctx))
defer func() {
require.NoError(t, bc.Stop(ctx))
}()
addr0 := ta.Addrinfo["producer"].String()
priKey0 := ta.Keyinfo["producer"].PriKey
addr1 := ta.Addrinfo["alfa"].String()
addr3 := ta.Addrinfo["charlie"].String()
priKey3 := ta.Keyinfo["charlie"].PriKey
require.NoError(t, addTestingTsfBlocks(bc))
// test third block
bytes := []byte{}
for i := 0; i < 1000; i++ {
bytes = append(bytes, 1)
}
actionMap := make(map[string][]action.SealedEnvelope)
actions := make([]action.SealedEnvelope, 0)
for i := uint64(0); i < 300; i++ {
tsf, err := testutil.SignedTransfer(addr1, priKey0, i+7, big.NewInt(2), bytes,
1000000, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(t, err)
actions = append(actions, tsf)
}
actionMap[addr0] = actions
transfer1, err := testutil.SignedTransfer(addr1, priKey3, 7, big.NewInt(2),
[]byte{}, 100000, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(t, err)
actionMap[addr3] = []action.SealedEnvelope{transfer1}
blk, err := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.NoError(t, err)
require.NotNil(t, blk)
require.Equal(t, 183, len(blk.Actions))
whetherInclude := false
for _, action := range blk.Actions {
if transfer1.Hash() == action.Hash() {
whetherInclude = true
break
}
}
require.True(t, whetherInclude)
}
type MockSubscriber struct {
counter int
mu sync.RWMutex
}
func (ms *MockSubscriber) HandleBlock(blk *block.Block) error {
ms.mu.Lock()
tsfs, _ := action.ClassifyActions(blk.Actions)
ms.counter += len(tsfs)
ms.mu.Unlock()
return nil
}
func (ms *MockSubscriber) Counter() int {
ms.mu.RLock()
defer ms.mu.RUnlock()
return ms.counter
}
func TestLoadBlockchainfromDB(t *testing.T) {
require := require.New(t)
ctx := context.Background()
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
cfg := config.Default
cfg.Plugins[config.GatewayPlugin] = true
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
sf.AddActionHandlers(account.NewProtocol())
// Create a blockchain from scratch
registry := protocol.Registry{}
acc := account.NewProtocol()
require.NoError(registry.Register(account.ProtocolID, acc))
bc := NewBlockchain(
cfg,
PrecreatedStateFactoryOption(sf),
BoltDBDaoOption(),
RegistryOption(®istry),
EnableExperimentalActions(),
)
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(registry.Register(rolldpos.ProtocolID, rp))
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
bc.Validator().AddActionValidators(acc)
sf.AddActionHandlers(acc)
require.NoError(bc.Start(ctx))
require.NoError(addCreatorToFactory(sf))
ms := &MockSubscriber{counter: 0}
err = bc.AddSubscriber(ms)
require.NoError(err)
require.Equal(0, ms.Counter())
height := bc.TipHeight()
fmt.Printf("Open blockchain pass, height = %d\n", height)
require.Nil(addTestingTsfBlocks(bc))
err = bc.Stop(ctx)
require.NoError(err)
require.Equal(24, ms.Counter())
// Load a blockchain from DB
sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
accountProtocol := account.NewProtocol()
sf.AddActionHandlers(accountProtocol)
registry = protocol.Registry{}
require.NoError(registry.Register(account.ProtocolID, accountProtocol))
bc = NewBlockchain(
cfg,
PrecreatedStateFactoryOption(sf),
BoltDBDaoOption(),
EnableExperimentalActions(),
)
rolldposProtocol := rolldpos.NewProtocol(
genesis.Default.NumCandidateDelegates,
genesis.Default.NumDelegates,
genesis.Default.NumSubEpochs,
)
require.NoError(registry.Register(rolldpos.ProtocolID, rolldposProtocol))
rewardingProtocol := rewarding.NewProtocol(bc, rolldposProtocol)
require.NoError(registry.Register(rewarding.ProtocolID, rewardingProtocol))
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, 0))
bc.Validator().AddActionValidators(accountProtocol)
require.NoError(bc.Start(ctx))
defer func() {
require.NoError(bc.Stop(ctx))
}()
hash1, err := bc.GetHashByHeight(1)
require.NoError(err)
height, err = bc.GetHeightByHash(hash1)
require.NoError(err)
require.Equal(uint64(1), height)
header, err := bc.BlockHeaderByHash(hash1)
require.NoError(err)
require.Equal(hash1, header.HashBlock())
fmt.Printf("block 1 hash = %x\n", hash1)
hash2, err := bc.GetHashByHeight(2)
require.NoError(err)
height, err = bc.GetHeightByHash(hash2)
require.NoError(err)
require.Equal(uint64(2), height)
header, err = bc.BlockHeaderByHash(hash2)
require.NoError(err)
require.Equal(hash2, header.HashBlock())
fmt.Printf("block 2 hash = %x\n", hash2)
hash3, err := bc.GetHashByHeight(3)
require.NoError(err)
height, err = bc.GetHeightByHash(hash3)
require.NoError(err)
require.Equal(uint64(3), height)
header, err = bc.BlockHeaderByHash(hash3)
require.NoError(err)
require.Equal(hash3, header.HashBlock())
fmt.Printf("block 3 hash = %x\n", hash3)
hash4, err := bc.GetHashByHeight(4)
require.NoError(err)
height, err = bc.GetHeightByHash(hash4)
require.NoError(err)
require.Equal(uint64(4), height)
header, err = bc.BlockHeaderByHash(hash4)
require.NoError(err)
require.Equal(hash4, header.HashBlock())
fmt.Printf("block 4 hash = %x\n", hash4)
hash5, err := bc.GetHashByHeight(5)
require.NoError(err)
height, err = bc.GetHeightByHash(hash5)
require.NoError(err)
require.Equal(uint64(5), height)
header, err = bc.BlockHeaderByHash(hash5)
require.NoError(err)
require.Equal(hash5, header.HashBlock())
fmt.Printf("block 5 hash = %x\n", hash5)
empblk, err := bc.GetBlockByHash(hash.ZeroHash256)
require.Nil(empblk)
require.NotNil(err.Error())
header, err = bc.BlockHeaderByHeight(60000)
require.Nil(header)
require.Error(err)
// add wrong blocks
h := bc.TipHeight()
blkhash := bc.TipHash()
header, err = bc.BlockHeaderByHeight(h)
require.NoError(err)
require.Equal(blkhash, header.HashBlock())
fmt.Printf("Current tip = %d hash = %x\n", h, blkhash)
// add block with wrong height
selp, err := testutil.SignedTransfer(ta.Addrinfo["bravo"].String(), ta.Keyinfo["producer"].PriKey, 1, big.NewInt(50), nil, genesis.Default.ActionGasLimit, big.NewInt(0))
require.NoError(err)
nblk, err := block.NewTestingBuilder().
SetHeight(h+2).
SetPrevBlockHash(blkhash).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
err = bc.ValidateBlock(&nblk)
require.Error(err)
fmt.Printf("Cannot validate block %d: %v\n", header.Height(), err)
// add block with zero prev hash
selp2, err := testutil.SignedTransfer(ta.Addrinfo["bravo"].String(), ta.Keyinfo["producer"].PriKey, 1, big.NewInt(50), nil, genesis.Default.ActionGasLimit, big.NewInt(0))
require.NoError(err)
nblk, err = block.NewTestingBuilder().
SetHeight(h+1).
SetPrevBlockHash(hash.ZeroHash256).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp2).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
err = bc.ValidateBlock(&nblk)
require.Error(err)
fmt.Printf("Cannot validate block %d: %v\n", header.Height(), err)
// add existing block again will have no effect
blk, err := bc.GetBlockByHeight(3)
require.NotNil(blk)
require.NoError(err)
require.NoError(bc.(*blockchain).commitBlock(blk))
fmt.Printf("Cannot add block 3 again: %v\n", err)
// check all Tx from block 4
header, err = bc.BlockHeaderByHeight(5)
require.NoError(err)
require.Equal(hash5, header.HashBlock())
_, err = bc.StateByAddr("")
require.Error(err)
}
func TestLoadBlockchainfromDBWithoutExplorer(t *testing.T) {
require := require.New(t)
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
ctx := context.Background()
cfg := config.Default
cfg.DB.UseBadgerDB = false // test with boltDB
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
sf.AddActionHandlers(account.NewProtocol())
// Create a blockchain from scratch
registry := protocol.Registry{}
acc := account.NewProtocol()
require.NoError(registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(registry.Register(rolldpos.ProtocolID, rp))
bc := NewBlockchain(
cfg,
PrecreatedStateFactoryOption(sf),
BoltDBDaoOption(),
RegistryOption(®istry),
EnableExperimentalActions(),
)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, genesis.Default.ActionGasLimit))
bc.Validator().AddActionValidators(acc)
sf.AddActionHandlers(acc)
require.NoError(bc.Start(ctx))
require.NoError(addCreatorToFactory(sf))
ms := &MockSubscriber{counter: 0}
err = bc.AddSubscriber(ms)
require.NoError(err)
require.Equal(0, ms.counter)
err = bc.RemoveSubscriber(ms)
require.NoError(err)
height := bc.TipHeight()
fmt.Printf("Open blockchain pass, height = %d\n", height)
require.Nil(addTestingTsfBlocks(bc))
err = bc.Stop(ctx)
require.NoError(err)
require.Equal(0, ms.counter)
// Load a blockchain from DB
sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
sf.AddActionHandlers(account.NewProtocol())
bc = NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, 0))
bc.Validator().AddActionValidators(account.NewProtocol())
require.NoError(bc.Start(ctx))
defer func() {
err := bc.Stop(ctx)
require.NoError(err)
}()
require.NotNil(bc)
// check hash<-->height mapping
hash1, err := bc.GetHashByHeight(1)
require.NoError(err)
height, err = bc.GetHeightByHash(hash1)
require.NoError(err)
require.Equal(uint64(1), height)
blk, err := bc.GetBlockByHash(hash1)
require.NoError(err)
require.Equal(hash1, blk.HashBlock())
fmt.Printf("block 1 hash = %x\n", hash1)
hash2, err := bc.GetHashByHeight(2)
require.NoError(err)
height, err = bc.GetHeightByHash(hash2)
require.NoError(err)
require.Equal(uint64(2), height)
blk, err = bc.GetBlockByHash(hash2)
require.NoError(err)
require.Equal(hash2, blk.HashBlock())
fmt.Printf("block 2 hash = %x\n", hash2)
hash3, err := bc.GetHashByHeight(3)
require.NoError(err)
height, err = bc.GetHeightByHash(hash3)
require.NoError(err)
require.Equal(uint64(3), height)
blk, err = bc.GetBlockByHash(hash3)
require.NoError(err)
require.Equal(hash3, blk.HashBlock())
fmt.Printf("block 3 hash = %x\n", hash3)
hash4, err := bc.GetHashByHeight(4)
require.NoError(err)
height, err = bc.GetHeightByHash(hash4)
require.NoError(err)
require.Equal(uint64(4), height)
blk, err = bc.GetBlockByHash(hash4)
require.NoError(err)
require.Equal(hash4, blk.HashBlock())
fmt.Printf("block 4 hash = %x\n", hash4)
empblk, err := bc.GetBlockByHash(hash.ZeroHash256)
require.Nil(empblk)
require.NotNil(err.Error())
blk, err = bc.GetBlockByHeight(60000)
require.Nil(blk)
require.Error(err)
// add wrong blocks
h := bc.TipHeight()
blkhash := bc.TipHash()
blk, err = bc.GetBlockByHeight(h)
require.NoError(err)
require.Equal(blkhash, blk.HashBlock())
fmt.Printf("Current tip = %d hash = %x\n", h, blkhash)
// add block with wrong height
selp, err := testutil.SignedTransfer(ta.Addrinfo["bravo"].String(), ta.Keyinfo["producer"].PriKey, 1, big.NewInt(50), nil, genesis.Default.ActionGasLimit, big.NewInt(0))
require.NoError(err)
nblk, err := block.NewTestingBuilder().
SetHeight(h+2).
SetPrevBlockHash(blkhash).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
err = bc.ValidateBlock(&nblk)
require.Error(err)
fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err)
// add block with zero prev hash
selp2, err := testutil.SignedTransfer(ta.Addrinfo["bravo"].String(), ta.Keyinfo["producer"].PriKey, 1, big.NewInt(50), nil, genesis.Default.ActionGasLimit, big.NewInt(0))
require.NoError(err)
nblk, err = block.NewTestingBuilder().
SetHeight(h+1).
SetPrevBlockHash(hash.ZeroHash256).
SetTimeStamp(testutil.TimestampNow()).
AddActions(selp2).SignAndBuild(ta.Keyinfo["bravo"].PubKey, ta.Keyinfo["bravo"].PriKey)
require.NoError(err)
err = bc.ValidateBlock(&nblk)
require.Error(err)
fmt.Printf("Cannot validate block %d: %v\n", blk.Height(), err)
// add existing block again will have no effect
blk, err = bc.GetBlockByHeight(3)
require.NotNil(blk)
require.NoError(err)
require.NoError(bc.(*blockchain).commitBlock(blk))
fmt.Printf("Cannot add block 3 again: %v\n", err)
// check all Tx from block 4
blk, err = bc.GetBlockByHeight(4)
require.NoError(err)
require.Equal(hash4, blk.HashBlock())
_, err = bc.StateByAddr("")
require.Error(err)
}
func TestBlockchain_Validator(t *testing.T) {
cfg := config.Default
// disable account-based testing
cfg.Chain.TrieDBPath = ""
ctx := context.Background()
bc := NewBlockchain(cfg, InMemDaoOption(), InMemStateFactoryOption())
require.NoError(t, bc.Start(ctx))
defer func() {
err := bc.Stop(ctx)
require.Nil(t, err)
}()
require.NotNil(t, bc)
val := bc.Validator()
require.NotNil(t, bc)
bc.SetValidator(val)
require.NotNil(t, bc.Validator())
}
func TestBlockchainInitialCandidate(t *testing.T) {
require := require.New(t)
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
cfg := config.Default
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption())
require.NoError(err)
accountProtocol := account.NewProtocol()
sf.AddActionHandlers(accountProtocol)
registry := protocol.Registry{}
require.NoError(registry.Register(account.ProtocolID, accountProtocol))
bc := NewBlockchain(
cfg,
PrecreatedStateFactoryOption(sf),
BoltDBDaoOption(),
RegistryOption(®istry),
)
rolldposProtocol := rolldpos.NewProtocol(
genesis.Default.NumCandidateDelegates,
genesis.Default.NumDelegates,
genesis.Default.NumSubEpochs,
)
require.NoError(registry.Register(rolldpos.ProtocolID, rolldposProtocol))
rewardingProtocol := rewarding.NewProtocol(bc, rolldposProtocol)
require.NoError(registry.Register(rewarding.ProtocolID, rewardingProtocol))
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
// TODO: we will fix this test case by testing using lifeLongDelegatesProtocol to initialize the candidates
// candidate, err := sf.CandidatesByHeight(0)
}
func TestBlockchain_StateByAddr(t *testing.T) {
require := require.New(t)
cfg := config.Default
// disable account-based testing
// create chain
bc := NewBlockchain(cfg, InMemDaoOption(), InMemStateFactoryOption())
require.NoError(bc.Start(context.Background()))
require.NotNil(bc)
_, err := bc.CreateState(identityset.Address(0).String(), big.NewInt(100))
require.NoError(err)
s, err := bc.StateByAddr(identityset.Address(0).String())
require.NoError(err)
require.Equal(uint64(0), s.Nonce)
require.Equal(big.NewInt(100), s.Balance)
require.Equal(hash.ZeroHash256, s.Root)
require.Equal([]byte(nil), s.CodeHash)
require.Equal(false, s.IsCandidate)
require.Equal(big.NewInt(0), s.VotingWeight)
require.Equal("", s.Votee)
}
func TestBlocks(t *testing.T) {
// This test is used for committing block verify benchmark purpose
t.Skip()
require := require.New(t)
cfg := config.Default
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, _ := factory.NewFactory(cfg, factory.InMemTrieOption())
// Create a blockchain from scratch
bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption())
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
require.NoError(addCreatorToFactory(sf))
a := ta.Addrinfo["alfa"].String()
priKeyA := ta.Keyinfo["alfa"].PriKey
c := ta.Addrinfo["bravo"].String()
ws, err := sf.NewWorkingSet()
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, a, big.NewInt(100000))
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, c, big.NewInt(100000))
require.NoError(err)
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: ta.Addrinfo["producer"],
GasLimit: gasLimit,
})
_, err = ws.RunActions(ctx, 0, nil)
require.NoError(err)
require.NoError(sf.Commit(ws))
for i := 0; i < 10; i++ {
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[a] = []action.SealedEnvelope{}
for i := 0; i < 1000; i++ {
tsf, err := testutil.SignedTransfer(c, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(err)
actionMap[a] = append(actionMap[a], tsf)
}
blk, _ := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.Nil(bc.ValidateBlock(blk))
require.Nil(bc.CommitBlock(blk))
}
}
func TestActions(t *testing.T) {
// This test is used for block verify benchmark purpose
t.Skip()
require := require.New(t)
cfg := config.Default
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
sf, _ := factory.NewFactory(cfg, factory.InMemTrieOption())
// Create a blockchain from scratch
bc := NewBlockchain(cfg, PrecreatedStateFactoryOption(sf), BoltDBDaoOption(), EnableExperimentalActions())
require.NoError(bc.Start(context.Background()))
defer func() {
require.NoError(bc.Stop(context.Background()))
}()
require.NoError(addCreatorToFactory(sf))
a := ta.Addrinfo["alfa"].String()
priKeyA := ta.Keyinfo["alfa"].PriKey
c := ta.Addrinfo["bravo"].String()
ws, err := sf.NewWorkingSet()
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, a, big.NewInt(100000))
require.NoError(err)
_, err = accountutil.LoadOrCreateAccount(ws, c, big.NewInt(100000))
require.NoError(err)
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: ta.Addrinfo["producer"],
GasLimit: gasLimit,
})
_, err = ws.RunActions(ctx, 0, nil)
require.NoError(err)
require.NoError(sf.Commit(ws))
val := &validator{sf: sf, validatorAddr: "", enableExperimentalActions: true}
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc, 0))
bc.Validator().AddActionValidators(account.NewProtocol())
actionMap := make(map[string][]action.SealedEnvelope)
for i := 0; i < 5000; i++ {
tsf, err := testutil.SignedTransfer(c, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(err)
actionMap[a] = append(actionMap[a], tsf)
tsf2, err := testutil.SignedTransfer(a, priKeyA, 1, big.NewInt(2), []byte{}, testutil.TestGasLimit, big.NewInt(testutil.TestGasPriceInt64))
require.NoError(err)
actionMap[a] = append(actionMap[a], tsf2)
}
blk, _ := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.Nil(val.Validate(blk, 0, blk.PrevHash()))
}
func addCreatorToFactory(sf factory.Factory) error {
ws, err := sf.NewWorkingSet()
if err != nil {
return err
}
if _, err = accountutil.LoadOrCreateAccount(
ws,
ta.Addrinfo["producer"].String(),
unit.ConvertIotxToRau(10000000000),
); err != nil {
return err
}
gasLimit := testutil.TestGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
Producer: ta.Addrinfo["producer"],
GasLimit: gasLimit,
})
if _, err = ws.RunActions(ctx, 0, nil); err != nil {
return err
}
return sf.Commit(ws)
}
| 1 | 17,651 | fix the group | iotexproject-iotex-core | go |
@@ -59,6 +59,13 @@ public class NodeStatus {
}
}
+ public boolean hasCapability(Capabilities caps) {
+ long count = slots.stream()
+ .filter(slot -> slot.isSupporting(caps))
+ .count();
+ return count > 0;
+ }
+
public boolean hasCapacity() {
return slots.stream().anyMatch(slot -> !slot.getSession().isPresent());
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.data;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.json.JsonInput;
import org.openqa.selenium.json.TypeToken;
import java.net.URI;
import java.time.Instant;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
public class NodeStatus {
private final NodeId nodeId;
private final URI externalUri;
private final int maxSessionCount;
private final Set<Slot> slots;
private final Availability availability;
public NodeStatus(
NodeId nodeId,
URI externalUri,
int maxSessionCount,
Set<Slot> slots,
Availability availability) {
this.nodeId = Require.nonNull("Node id", nodeId);
this.externalUri = Require.nonNull("URI", externalUri);
this.maxSessionCount = Require.positive("Max session count",
maxSessionCount,
"Make sure that a driver is available on $PATH");
this.slots = ImmutableSet.copyOf(Require.nonNull("Slots", slots));
this.availability = Require.nonNull("Availability", availability);
ImmutableSet.Builder<Session> sessions = ImmutableSet.builder();
for (Slot slot : slots) {
slot.getSession().ifPresent(sessions::add);
}
}
public boolean hasCapacity() {
return slots.stream().anyMatch(slot -> !slot.getSession().isPresent());
}
public boolean hasCapacity(Capabilities caps) {
long count = slots.stream()
.filter(slot -> !slot.getSession().isPresent())
.filter(slot -> slot.isSupporting(caps))
.count();
return count > 0;
}
public NodeId getId() {
return nodeId;
}
public URI getUri() {
return externalUri;
}
public int getMaxSessionCount() {
return maxSessionCount;
}
public Set<Slot> getSlots() {
return slots;
}
public Availability getAvailability() {
return availability;
}
public float getLoad() {
float inUse = slots.parallelStream()
.filter(slot -> slot.getSession().isPresent())
.count();
return (inUse / (float) maxSessionCount) * 100f;
}
public long getLastSessionCreated() {
return slots.parallelStream()
.map(Slot::getLastStarted)
.mapToLong(Instant::toEpochMilli)
.max()
.orElse(0);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof NodeStatus)) {
return false;
}
NodeStatus that = (NodeStatus) o;
return Objects.equals(this.nodeId, that.nodeId) &&
Objects.equals(this.externalUri, that.externalUri) &&
this.maxSessionCount == that.maxSessionCount &&
Objects.equals(this.slots, that.slots) &&
Objects.equals(this.availability, that.availability);
}
@Override
public int hashCode() {
return Objects.hash(nodeId, externalUri, maxSessionCount, slots);
}
private Map<String, Object> toJson() {
return new ImmutableMap.Builder<String, Object>()
.put("id", nodeId)
.put("uri", externalUri)
.put("maxSessions", maxSessionCount)
.put("slots", slots)
.put("availability", availability)
.build();
}
public static NodeStatus fromJson(JsonInput input) {
NodeId nodeId = null;
URI uri = null;
int maxSessions = 0;
Set<Slot> slots = null;
Availability availability = null;
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "availability":
availability = input.read(Availability.class);
break;
case "id":
nodeId = input.read(NodeId.class);
break;
case "maxSessions":
maxSessions = input.read(Integer.class);
break;
case "slots":
slots = input.read(new TypeToken<Set<Slot>>(){}.getType());
break;
case "uri":
uri = input.read(URI.class);
break;
default:
input.skipValue();
break;
}
}
input.endObject();
return new NodeStatus(
nodeId,
uri,
maxSessions,
slots,
availability);
}
}
| 1 | 18,174 | Prefer `Stream.anyMatch` instead of iterating over all slots. | SeleniumHQ-selenium | py |
@@ -47,7 +47,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) {
// Create a network and block service
net := &httpTestPeerSource{}
- ls := rpcs.MakeBlockService(config.GetDefaultLocal(), remote, net, "test genesisID")
+ ls := rpcs.MakeBlockService(logging.Base(), config.GetDefaultLocal(), remote, net, "test genesisID")
nodeA := basicRPCNode{}
nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
nodeA.start() | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package catchup
import (
"math/rand"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/datatest"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
"github.com/algorand/go-algorand/rpcs"
)
func BenchmarkServiceFetchBlocks(b *testing.B) {
b.StopTimer()
// Make Ledger
remote, local, release, genesisBalances := benchenv(b, 100, 500)
defer release()
require.NotNil(b, remote)
require.NotNil(b, local)
// Create a network and block service
net := &httpTestPeerSource{}
ls := rpcs.MakeBlockService(config.GetDefaultLocal(), remote, net, "test genesisID")
nodeA := basicRPCNode{}
nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
nodeA.start()
defer nodeA.stop()
rootURL := nodeA.rootURL()
net.addPeer(rootURL)
cfg := config.GetDefaultLocal()
cfg.Archival = true
for i := 0; i < b.N; i++ {
inMem := true
local, err := data.LoadLedger(logging.Base(), b.Name()+"empty"+strconv.Itoa(i), inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg)
require.NoError(b, err)
// Make Service
syncer := MakeService(logging.Base(), defaultConfig, net, local, new(mockedAuthenticator), nil)
b.StartTimer()
syncer.Start()
for w := 0; w < 1000; w++ {
if remote.LastRound() == local.LastRound() {
break
}
time.Sleep(10 * time.Millisecond)
}
b.StopTimer()
syncer.Stop()
require.Equal(b, remote.LastRound(), local.LastRound())
local.Close()
}
}
// one service
func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *data.Ledger, release func(), genesisBalances data.GenesisBalances) {
P := numAccounts // n accounts
maxMoneyAtStart := uint64(10 * defaultRewardUnit) // max money start
minMoneyAtStart := uint64(defaultRewardUnit) // min money start
accesssors := make([]db.Accessor, 0)
release = func() {
ledger.Close()
emptyLedger.Close()
for _, acc := range accesssors {
acc.Close()
}
}
// generate accounts
genesis := make(map[basics.Address]basics.AccountData)
gen := rand.New(rand.NewSource(2))
parts := make([]account.Participation, P)
for i := 0; i < P; i++ {
access, err := db.MakeAccessor(t.Name()+"_root_benchenv"+strconv.Itoa(i), false, true)
if err != nil {
panic(err)
}
accesssors = append(accesssors, access)
root, err := account.GenerateRoot(access)
if err != nil {
panic(err)
}
access, err = db.MakeAccessor(t.Name()+"_part_benchenv"+strconv.Itoa(i), false, true)
if err != nil {
panic(err)
}
accesssors = append(accesssors, access)
part, err := account.FillDBWithParticipationKeys(access, root.Address(), 0, basics.Round(numBlocks),
config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
if err != nil {
panic(err)
}
startamt := basics.AccountData{
Status: basics.Online,
MicroAlgos: basics.MicroAlgos{Raw: uint64(minMoneyAtStart + (gen.Uint64() % (maxMoneyAtStart - minMoneyAtStart)))},
SelectionID: part.VRFSecrets().PK,
VoteID: part.VotingSecrets().OneTimeSignatureVerifier,
}
short := root.Address()
parts[i] = part
genesis[short] = startamt
}
genesis[basics.Address(sinkAddr)] = basics.AccountData{
Status: basics.NotParticipating,
MicroAlgos: basics.MicroAlgos{Raw: uint64(1e3 * minMoneyAtStart)},
}
genesis[basics.Address(poolAddr)] = basics.AccountData{
Status: basics.NotParticipating,
MicroAlgos: basics.MicroAlgos{Raw: uint64(1e3 * minMoneyAtStart)},
}
var err error
genesisBalances = data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
const inMem = true
cfg := config.GetDefaultLocal()
cfg.Archival = true
emptyLedger, err = data.LoadLedger(logging.Base(), t.Name()+"empty", inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg)
require.NoError(t, err)
ledger, err = datatest.FabricateLedger(logging.Base(), t.Name(), parts, genesisBalances, emptyLedger.LastRound()+basics.Round(numBlocks))
require.NoError(t, err)
require.Equal(t, ledger.LastRound(), emptyLedger.LastRound()+basics.Round(numBlocks))
return ledger, emptyLedger, release, genesisBalances
}
| 1 | 42,224 | we have `logging.TestingLog()` that should be used for that purpose. | algorand-go-algorand | go |
@@ -2372,7 +2372,7 @@ int mg_url_decode(const char *src, int src_len, char *dst,
#define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W')
for (i = j = 0; i < src_len && j < dst_len - 1; i++, j++) {
- if (src[i] == '%' && i < src_len - 2 &&
+ if (i < src_len - 2 && src[i] == '%' &&
isxdigit(* (const unsigned char *) (src + i + 1)) &&
isxdigit(* (const unsigned char *) (src + i + 2))) {
a = tolower(* (const unsigned char *) (src + i + 1)); | 1 | // Copyright (c) 2004-2013 Sergey Lyubka <[email protected]>
// Copyright (c) 2013-2014 Cesanta Software Limited
// All rights reserved
//
// This library is dual-licensed: you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation. For the terms of this
// license, see <http://www.gnu.org/licenses/>.
//
// You are free to use this library under the terms of the GNU General
// Public License, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU General Public License for more details.
//
// Alternatively, you can license this library under a commercial
// license, as set out in <http://cesanta.com/>.
#ifdef NOEMBED_NET_SKELETON
#include "net_skeleton.h"
#else
// net_skeleton start
// Copyright (c) 2014 Cesanta Software Limited
// All rights reserved
//
// This software is dual-licensed: you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation. For the terms of this
// license, see <http://www.gnu.org/licenses/>.
//
// You are free to use this software under the terms of the GNU General
// Public License, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU General Public License for more details.
//
// Alternatively, you can license this software under a commercial
// license, as set out in <http://cesanta.com/>.
//
// $Date: 2014-09-28 05:04:41 UTC $
#ifndef NS_SKELETON_HEADER_INCLUDED
#define NS_SKELETON_HEADER_INCLUDED
#define NS_SKELETON_VERSION "2.1.0"
#undef UNICODE // Use ANSI WinAPI functions
#undef _UNICODE // Use multibyte encoding on Windows
#define _INTEGRAL_MAX_BITS 64 // Enable _stati64() on Windows
#undef WIN32_LEAN_AND_MEAN // Let windows.h always include winsock2.h
#define _XOPEN_SOURCE 600 // For flockfile() on Linux
#define __STDC_FORMAT_MACROS // <inttypes.h> wants this for C++
#define __STDC_LIMIT_MACROS // C++ wants that for INT64_MAX
#ifndef _LARGEFILE_SOURCE
#define _LARGEFILE_SOURCE // Enable fseeko() and ftello() functions
#endif
#define _FILE_OFFSET_BITS 64 // Enable 64-bit file offsets
#ifdef _MSC_VER
#pragma warning (disable : 4127) // FD_SET() emits warning, disable it
#pragma warning (disable : 4204) // missing c99 support
#pragma warning (disable : 4267) // conversion from 'size_t' to 'int', possible loss of data
#pragma warning (disable : 4244) // conversion from '__int64' to 'int', possible loss of data
#endif
#include <sys/types.h>
#include <sys/stat.h>
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <signal.h>
#ifdef _WIN32
#ifdef _MSC_VER
#pragma comment(lib, "ws2_32.lib") // Linking with winsock library
#endif
#include <windows.h>
#include <process.h>
#ifndef EINPROGRESS
#define EINPROGRESS WSAEINPROGRESS
#endif
#ifndef EWOULDBLOCK
#define EWOULDBLOCK WSAEWOULDBLOCK
#endif
#ifndef __func__
#define STRX(x) #x
#define STR(x) STRX(x)
#define __func__ __FILE__ ":" STR(__LINE__)
#endif
#ifndef va_copy
#define va_copy(x,y) x = y
#endif // MINGW #defines va_copy
#define snprintf _snprintf
#define vsnprintf _vsnprintf
#define sleep(x) Sleep((x) * 1000)
#define to64(x) _atoi64(x)
typedef int socklen_t;
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned short uint16_t;
typedef unsigned __int64 uint64_t;
typedef __int64 int64_t;
typedef SOCKET sock_t;
typedef struct _stati64 ns_stat_t;
#ifndef S_ISDIR
#define S_ISDIR(x) ((x) & _S_IFDIR)
#endif
#else
#include <errno.h>
#include <fcntl.h>
#include <netdb.h>
#include <pthread.h>
#include <stdarg.h>
#include <unistd.h>
#include <arpa/inet.h> // For inet_pton() when NS_ENABLE_IPV6 is defined
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/select.h>
#define closesocket(x) close(x)
#define __cdecl
#define INVALID_SOCKET (-1)
#define to64(x) strtoll(x, NULL, 10)
typedef int sock_t;
typedef struct stat ns_stat_t;
#endif
#ifdef NS_ENABLE_DEBUG
#define DBG(x) do { printf("%-20s ", __func__); printf x; putchar('\n'); \
fflush(stdout); } while(0)
#else
#define DBG(x)
#endif
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0]))
#endif
#ifdef NS_ENABLE_SSL
#ifdef __APPLE__
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#include <openssl/ssl.h>
#else
typedef void *SSL;
typedef void *SSL_CTX;
#endif
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
union socket_address {
struct sockaddr sa;
struct sockaddr_in sin;
#ifdef NS_ENABLE_IPV6
struct sockaddr_in6 sin6;
#else
struct sockaddr sin6;
#endif
};
// Describes chunk of memory
struct ns_str {
const char *p;
size_t len;
};
// IO buffers interface
struct iobuf {
char *buf;
size_t len;
size_t size;
};
void iobuf_init(struct iobuf *, size_t initial_size);
void iobuf_free(struct iobuf *);
size_t iobuf_append(struct iobuf *, const void *data, size_t data_size);
void iobuf_remove(struct iobuf *, size_t data_size);
void iobuf_resize(struct iobuf *, size_t new_size);
// Callback function (event handler) prototype, must be defined by user.
// Net skeleton will call event handler, passing events defined above.
struct ns_connection;
typedef void (*ns_callback_t)(struct ns_connection *, int event_num, void *evp);
// Events. Meaning of event parameter (evp) is given in the comment.
#define NS_POLL 0 // Sent to each connection on each call to ns_mgr_poll()
#define NS_ACCEPT 1 // New connection accept()-ed. union socket_address *addr
#define NS_CONNECT 2 // connect() succeeded or failed. int *success_status
#define NS_RECV 3 // Data has benn received. int *num_bytes
#define NS_SEND 4 // Data has been written to a socket. int *num_bytes
#define NS_CLOSE 5 // Connection is closed. NULL
struct ns_mgr {
struct ns_connection *active_connections;
const char *hexdump_file; // Debug hexdump file path
sock_t ctl[2]; // Socketpair for mg_wakeup()
void *user_data; // User data
};
struct ns_connection {
struct ns_connection *next, *prev; // ns_mgr::active_connections linkage
struct ns_connection *listener; // Set only for accept()-ed connections
struct ns_mgr *mgr;
sock_t sock; // Socket
union socket_address sa; // Peer address
struct iobuf recv_iobuf; // Received data
struct iobuf send_iobuf; // Data scheduled for sending
SSL *ssl;
SSL_CTX *ssl_ctx;
void *user_data; // User-specific data
void *proto_data; // Application protocol-specific data
time_t last_io_time; // Timestamp of the last socket IO
ns_callback_t callback; // Event handler function
unsigned int flags;
#define NSF_FINISHED_SENDING_DATA (1 << 0)
#define NSF_BUFFER_BUT_DONT_SEND (1 << 1)
#define NSF_SSL_HANDSHAKE_DONE (1 << 2)
#define NSF_CONNECTING (1 << 3)
#define NSF_CLOSE_IMMEDIATELY (1 << 4)
#define NSF_WANT_READ (1 << 5)
#define NSF_WANT_WRITE (1 << 6)
#define NSF_LISTENING (1 << 7)
#define NSF_UDP (1 << 8)
#define NSF_USER_1 (1 << 20)
#define NSF_USER_2 (1 << 21)
#define NSF_USER_3 (1 << 22)
#define NSF_USER_4 (1 << 23)
#define NSF_USER_5 (1 << 24)
#define NSF_USER_6 (1 << 25)
};
void ns_mgr_init(struct ns_mgr *, void *user_data);
void ns_mgr_free(struct ns_mgr *);
time_t ns_mgr_poll(struct ns_mgr *, int milli);
void ns_broadcast(struct ns_mgr *, ns_callback_t, void *, size_t);
struct ns_connection *ns_next(struct ns_mgr *, struct ns_connection *);
struct ns_connection *ns_add_sock(struct ns_mgr *, sock_t,
ns_callback_t, void *);
struct ns_connection *ns_bind(struct ns_mgr *, const char *,
ns_callback_t, void *);
struct ns_connection *ns_connect(struct ns_mgr *, const char *,
ns_callback_t, void *);
int ns_send(struct ns_connection *, const void *buf, int len);
int ns_printf(struct ns_connection *, const char *fmt, ...);
int ns_vprintf(struct ns_connection *, const char *fmt, va_list ap);
// Utility functions
void *ns_start_thread(void *(*f)(void *), void *p);
int ns_socketpair(sock_t [2]);
int ns_socketpair2(sock_t [2], int sock_type); // SOCK_STREAM or SOCK_DGRAM
void ns_set_close_on_exec(sock_t);
void ns_sock_to_str(sock_t sock, char *buf, size_t len, int flags);
int ns_hexdump(const void *buf, int len, char *dst, int dst_len);
int ns_avprintf(char **buf, size_t size, const char *fmt, va_list ap);
int ns_resolve(const char *domain_name, char *ip_addr_buf, size_t buf_len);
#ifdef __cplusplus
}
#endif // __cplusplus
#endif // NS_SKELETON_HEADER_INCLUDED
// Copyright (c) 2014 Cesanta Software Limited
// All rights reserved
//
// This software is dual-licensed: you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation. For the terms of this
// license, see <http://www.gnu.org/licenses/>.
//
// You are free to use this software under the terms of the GNU General
// Public License, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU General Public License for more details.
//
// Alternatively, you can license this software under a commercial
// license, as set out in <http://cesanta.com/>.
//
// $Date: 2014-09-28 05:04:41 UTC $
#ifndef NS_MALLOC
#define NS_MALLOC malloc
#endif
#ifndef NS_REALLOC
#define NS_REALLOC realloc
#endif
#ifndef NS_FREE
#define NS_FREE free
#endif
#ifndef NS_CALLOC
#define NS_CALLOC calloc
#endif
#define NS_CTL_MSG_MESSAGE_SIZE (8 * 1024)
#define NS_READ_BUFFER_SIZE 2048
#define NS_UDP_RECEIVE_BUFFER_SIZE 2000
#define NS_VPRINTF_BUFFER_SIZE 500
struct ctl_msg {
ns_callback_t callback;
char message[NS_CTL_MSG_MESSAGE_SIZE];
};
void iobuf_resize(struct iobuf *io, size_t new_size) {
char *p;
if ((new_size > io->size || (new_size < io->size && new_size >= io->len)) &&
(p = (char *) NS_REALLOC(io->buf, new_size)) != NULL) {
io->size = new_size;
io->buf = p;
}
}
void iobuf_init(struct iobuf *iobuf, size_t initial_size) {
iobuf->len = iobuf->size = 0;
iobuf->buf = NULL;
iobuf_resize(iobuf, initial_size);
}
void iobuf_free(struct iobuf *iobuf) {
if (iobuf != NULL) {
if (iobuf->buf != NULL) NS_FREE(iobuf->buf);
iobuf_init(iobuf, 0);
}
}
size_t iobuf_append(struct iobuf *io, const void *buf, size_t len) {
char *p = NULL;
assert(io != NULL);
assert(io->len <= io->size);
if (len <= 0) {
} else if (io->len + len <= io->size) {
memcpy(io->buf + io->len, buf, len);
io->len += len;
} else if ((p = (char *) NS_REALLOC(io->buf, io->len + len)) != NULL) {
io->buf = p;
memcpy(io->buf + io->len, buf, len);
io->len += len;
io->size = io->len;
} else {
len = 0;
}
return len;
}
void iobuf_remove(struct iobuf *io, size_t n) {
if (n > 0 && n <= io->len) {
memmove(io->buf, io->buf + n, io->len - n);
io->len -= n;
}
}
static size_t ns_out(struct ns_connection *nc, const void *buf, size_t len) {
if (nc->flags & NSF_UDP) {
long n = sendto(nc->sock, (const char*)buf, len, 0, &nc->sa.sa, sizeof(nc->sa.sin));
DBG(("%p %d send %ld (%d %s)", nc, nc->sock, n, errno, strerror(errno)));
return n < 0 ? 0 : n;
} else {
return iobuf_append(&nc->send_iobuf, buf, len);
}
}
#ifndef NS_DISABLE_THREADS
void *ns_start_thread(void *(*f)(void *), void *p) {
#ifdef _WIN32
return (void *) _beginthread((void (__cdecl *)(void *)) f, 0, p);
#else
pthread_t thread_id = (pthread_t) 0;
pthread_attr_t attr;
(void) pthread_attr_init(&attr);
(void) pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
#if defined(NS_STACK_SIZE) && NS_STACK_SIZE > 1
(void) pthread_attr_setstacksize(&attr, NS_STACK_SIZE);
#endif
pthread_create(&thread_id, &attr, f, p);
pthread_attr_destroy(&attr);
return (void *) thread_id;
#endif
}
#endif // NS_DISABLE_THREADS
static void ns_add_conn(struct ns_mgr *mgr, struct ns_connection *c) {
c->next = mgr->active_connections;
mgr->active_connections = c;
c->prev = NULL;
if (c->next != NULL) c->next->prev = c;
}
static void ns_remove_conn(struct ns_connection *conn) {
if (conn->prev == NULL) conn->mgr->active_connections = conn->next;
if (conn->prev) conn->prev->next = conn->next;
if (conn->next) conn->next->prev = conn->prev;
}
// Print message to buffer. If buffer is large enough to hold the message,
// return buffer. If buffer is to small, allocate large enough buffer on heap,
// and return allocated buffer.
int ns_avprintf(char **buf, size_t size, const char *fmt, va_list ap) {
va_list ap_copy;
int len;
va_copy(ap_copy, ap);
len = vsnprintf(*buf, size, fmt, ap_copy);
va_end(ap_copy);
if (len < 0) {
// eCos and Windows are not standard-compliant and return -1 when
// the buffer is too small. Keep allocating larger buffers until we
// succeed or out of memory.
*buf = NULL;
while (len < 0) {
if (*buf) NS_FREE(*buf);
size *= 2;
if ((*buf = (char *) NS_MALLOC(size)) == NULL) break;
va_copy(ap_copy, ap);
len = vsnprintf(*buf, size, fmt, ap_copy);
va_end(ap_copy);
}
} else if (len > (int) size) {
// Standard-compliant code path. Allocate a buffer that is large enough.
if ((*buf = (char *) NS_MALLOC(len + 1)) == NULL) {
len = -1;
} else {
va_copy(ap_copy, ap);
len = vsnprintf(*buf, len + 1, fmt, ap_copy);
va_end(ap_copy);
}
}
return len;
}
int ns_vprintf(struct ns_connection *nc, const char *fmt, va_list ap) {
char mem[NS_VPRINTF_BUFFER_SIZE], *buf = mem;
int len;
if ((len = ns_avprintf(&buf, sizeof(mem), fmt, ap)) > 0) {
ns_out(nc, buf, len);
}
if (buf != mem && buf != NULL) {
NS_FREE(buf);
}
return len;
}
int ns_printf(struct ns_connection *conn, const char *fmt, ...) {
int len;
va_list ap;
va_start(ap, fmt);
len = ns_vprintf(conn, fmt, ap);
va_end(ap);
return len;
}
static void hexdump(struct ns_connection *nc, const char *path,
int num_bytes, int ev) {
const struct iobuf *io = ev == NS_SEND ? &nc->send_iobuf : &nc->recv_iobuf;
FILE *fp;
char *buf, src[60], dst[60];
int buf_size = num_bytes * 5 + 100;
if ((fp = fopen(path, "a")) != NULL) {
ns_sock_to_str(nc->sock, src, sizeof(src), 3);
ns_sock_to_str(nc->sock, dst, sizeof(dst), 7);
fprintf(fp, "%lu %p %s %s %s %d\n", (unsigned long) time(NULL),
nc->user_data, src,
ev == NS_RECV ? "<-" : ev == NS_SEND ? "->" :
ev == NS_ACCEPT ? "<A" : ev == NS_CONNECT ? "C>" : "XX",
dst, num_bytes);
if (num_bytes > 0 && (buf = (char *) NS_MALLOC(buf_size)) != NULL) {
ns_hexdump(io->buf + (ev == NS_SEND ? 0 : io->len) -
(ev == NS_SEND ? 0 : num_bytes), num_bytes, buf, buf_size);
fprintf(fp, "%s", buf);
NS_FREE(buf);
}
fclose(fp);
}
}
static void ns_call(struct ns_connection *nc, int ev, void *p) {
if (nc->mgr->hexdump_file != NULL && ev != NS_POLL) {
int len = (ev == NS_RECV || ev == NS_SEND) ? * (int *) p : 0;
hexdump(nc, nc->mgr->hexdump_file, len, ev);
}
nc->callback(nc, ev, p);
}
static void ns_destroy_conn(struct ns_connection *conn) {
closesocket(conn->sock);
iobuf_free(&conn->recv_iobuf);
iobuf_free(&conn->send_iobuf);
#ifdef NS_ENABLE_SSL
if (conn->ssl != NULL) {
SSL_free(conn->ssl);
}
if (conn->ssl_ctx != NULL) {
SSL_CTX_free(conn->ssl_ctx);
}
#endif
NS_FREE(conn);
}
static void ns_close_conn(struct ns_connection *conn) {
DBG(("%p %d", conn, conn->flags));
ns_call(conn, NS_CLOSE, NULL);
ns_remove_conn(conn);
ns_destroy_conn(conn);
}
void ns_set_close_on_exec(sock_t sock) {
#ifdef _WIN32
(void) SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0);
#else
fcntl(sock, F_SETFD, FD_CLOEXEC);
#endif
}
static void ns_set_non_blocking_mode(sock_t sock) {
#ifdef _WIN32
unsigned long on = 1;
ioctlsocket(sock, FIONBIO, &on);
#else
int flags = fcntl(sock, F_GETFL, 0);
fcntl(sock, F_SETFL, flags | O_NONBLOCK);
#endif
}
#ifndef NS_DISABLE_SOCKETPAIR
int ns_socketpair2(sock_t sp[2], int sock_type) {
union socket_address sa;
sock_t sock;
socklen_t len = sizeof(sa.sin);
int ret = 0;
sp[0] = sp[1] = INVALID_SOCKET;
(void) memset(&sa, 0, sizeof(sa));
sa.sin.sin_family = AF_INET;
sa.sin.sin_port = htons(0);
sa.sin.sin_addr.s_addr = htonl(0x7f000001);
if ((sock = socket(AF_INET, sock_type, 0)) != INVALID_SOCKET &&
!bind(sock, &sa.sa, len) &&
(sock_type == SOCK_DGRAM || !listen(sock, 1)) &&
!getsockname(sock, &sa.sa, &len) &&
(sp[0] = socket(AF_INET, sock_type, 0)) != INVALID_SOCKET &&
!connect(sp[0], &sa.sa, len) &&
(sock_type == SOCK_STREAM ||
(!getsockname(sp[0], &sa.sa, &len) && !connect(sock, &sa.sa, len))) &&
(sp[1] = (sock_type == SOCK_DGRAM ? sock :
accept(sock, &sa.sa, &len))) != INVALID_SOCKET) {
ns_set_close_on_exec(sp[0]);
ns_set_close_on_exec(sp[1]);
ret = 1;
} else {
if (sp[0] != INVALID_SOCKET) closesocket(sp[0]);
if (sp[1] != INVALID_SOCKET) closesocket(sp[1]);
sp[0] = sp[1] = INVALID_SOCKET;
}
if (sock_type != SOCK_DGRAM) closesocket(sock);
return ret;
}
int ns_socketpair(sock_t sp[2]) {
return ns_socketpair2(sp, SOCK_STREAM);
}
#endif // NS_DISABLE_SOCKETPAIR
// TODO(lsm): use non-blocking resolver
static int ns_resolve2(const char *host, struct in_addr *ina) {
struct hostent *he;
if ((he = gethostbyname(host)) == NULL) {
DBG(("gethostbyname(%s) failed: %s", host, strerror(errno)));
} else {
memcpy(ina, he->h_addr_list[0], sizeof(*ina));
return 1;
}
return 0;
}
// Resolve FDQN "host", store IP address in the "ip".
// Return > 0 (IP address length) on success.
int ns_resolve(const char *host, char *buf, size_t n) {
struct in_addr ad;
return ns_resolve2(host, &ad) ? snprintf(buf, n, "%s", inet_ntoa(ad)) : 0;
}
// Address format: [PROTO://][IP_ADDRESS:]PORT[:CERT][:CA_CERT]
static int ns_parse_address(const char *str, union socket_address *sa,
int *proto, int *use_ssl, char *cert, char *ca) {
unsigned int a, b, c, d, port;
int n = 0, len = 0;
char host[200];
#ifdef NS_ENABLE_IPV6
char buf[100];
#endif
// MacOS needs that. If we do not zero it, subsequent bind() will fail.
// Also, all-zeroes in the socket address means binding to all addresses
// for both IPv4 and IPv6 (INADDR_ANY and IN6ADDR_ANY_INIT).
memset(sa, 0, sizeof(*sa));
sa->sin.sin_family = AF_INET;
*proto = SOCK_STREAM;
*use_ssl = 0;
cert[0] = ca[0] = '\0';
if (memcmp(str, "ssl://", 6) == 0) {
str += 6;
*use_ssl = 1;
} else if (memcmp(str, "udp://", 6) == 0) {
str += 6;
*proto = SOCK_DGRAM;
} else if (memcmp(str, "tcp://", 6) == 0) {
str += 6;
}
if (sscanf(str, "%u.%u.%u.%u:%u%n", &a, &b, &c, &d, &port, &len) == 5) {
// Bind to a specific IPv4 address, e.g. 192.168.1.5:8080
sa->sin.sin_addr.s_addr = htonl((a << 24) | (b << 16) | (c << 8) | d);
sa->sin.sin_port = htons((uint16_t) port);
#ifdef NS_ENABLE_IPV6
} else if (sscanf(str, "[%99[^]]]:%u%n", buf, &port, &len) == 2 &&
inet_pton(AF_INET6, buf, &sa->sin6.sin6_addr)) {
// IPv6 address, e.g. [3ffe:2a00:100:7031::1]:8080
sa->sin6.sin6_family = AF_INET6;
sa->sin6.sin6_port = htons((uint16_t) port);
#endif
} else if (sscanf(str, "%199[^ :]:%u%n", host, &port, &len) == 2) {
sa->sin.sin_port = htons((uint16_t) port);
ns_resolve2(host, &sa->sin.sin_addr);
} else if (sscanf(str, "%u%n", &port, &len) == 1) {
// If only port is specified, bind to IPv4, INADDR_ANY
sa->sin.sin_port = htons((uint16_t) port);
}
if (*use_ssl && (sscanf(str + len, ":%99[^:]:%99[^:]%n", cert, ca, &n) == 2 ||
sscanf(str + len, ":%99[^:]%n", cert, &n) == 1)) {
len += n;
}
return port < 0xffff && str[len] == '\0' ? len : 0;
}
// 'sa' must be an initialized address to bind to
static sock_t ns_open_listening_socket(union socket_address *sa, int proto) {
socklen_t sa_len = (sa->sa.sa_family == AF_INET) ?
sizeof(sa->sin) : sizeof(sa->sin6);
sock_t sock = INVALID_SOCKET;
#ifndef _WIN32
int on = 1;
#endif
if ((sock = socket(sa->sa.sa_family, proto, 0)) != INVALID_SOCKET &&
#ifndef _WIN32
// SO_RESUSEADDR is not enabled on Windows because the semantics of
// SO_REUSEADDR on UNIX and Windows is different. On Windows,
// SO_REUSEADDR allows to bind a socket to a port without error even if
// the port is already open by another program. This is not the behavior
// SO_REUSEADDR was designed for, and leads to hard-to-track failure
// scenarios. Therefore, SO_REUSEADDR was disabled on Windows.
!setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void *) &on, sizeof(on)) &&
#endif
!bind(sock, &sa->sa, sa_len) &&
(proto == SOCK_DGRAM || listen(sock, SOMAXCONN) == 0)) {
ns_set_non_blocking_mode(sock);
// In case port was set to 0, get the real port number
(void) getsockname(sock, &sa->sa, &sa_len);
} else if (sock != INVALID_SOCKET) {
closesocket(sock);
sock = INVALID_SOCKET;
}
return sock;
}
#ifdef NS_ENABLE_SSL
// Certificate generation script is at
// https://github.com/cesanta/net_skeleton/blob/master/scripts/gen_certs.sh
static int ns_use_ca_cert(SSL_CTX *ctx, const char *cert) {
if (ctx == NULL) {
return -1;
} else if (cert == NULL || cert[0] == '\0') {
return 0;
}
SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, 0);
return SSL_CTX_load_verify_locations(ctx, cert, NULL) == 1 ? 0 : -2;
}
static int ns_use_cert(SSL_CTX *ctx, const char *pem_file) {
if (ctx == NULL) {
return -1;
} else if (pem_file == NULL || pem_file[0] == '\0') {
return 0;
} else if (SSL_CTX_use_certificate_file(ctx, pem_file, 1) == 0 ||
SSL_CTX_use_PrivateKey_file(ctx, pem_file, 1) == 0) {
return -2;
} else {
SSL_CTX_set_mode(ctx, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
SSL_CTX_use_certificate_chain_file(ctx, pem_file);
return 0;
}
}
#endif // NS_ENABLE_SSL
struct ns_connection *ns_bind(struct ns_mgr *srv, const char *str,
ns_callback_t callback, void *user_data) {
union socket_address sa;
struct ns_connection *nc = NULL;
int use_ssl, proto;
char cert[100], ca_cert[100];
sock_t sock;
ns_parse_address(str, &sa, &proto, &use_ssl, cert, ca_cert);
if (use_ssl && cert[0] == '\0') return NULL;
if ((sock = ns_open_listening_socket(&sa, proto)) == INVALID_SOCKET) {
} else if ((nc = ns_add_sock(srv, sock, callback, NULL)) == NULL) {
closesocket(sock);
} else {
nc->sa = sa;
nc->flags |= NSF_LISTENING;
nc->user_data = user_data;
nc->callback = callback;
if (proto == SOCK_DGRAM) {
nc->flags |= NSF_UDP;
}
#ifdef NS_ENABLE_SSL
if (use_ssl) {
nc->ssl_ctx = SSL_CTX_new(SSLv23_server_method());
if (ns_use_cert(nc->ssl_ctx, cert) != 0 ||
ns_use_ca_cert(nc->ssl_ctx, ca_cert) != 0) {
ns_close_conn(nc);
nc = NULL;
}
}
#endif
DBG(("%p sock %d/%d ssl %p %p", nc, sock, proto, nc->ssl_ctx, nc->ssl));
}
return nc;
}
static struct ns_connection *accept_conn(struct ns_connection *ls) {
struct ns_connection *c = NULL;
union socket_address sa;
socklen_t len = sizeof(sa);
sock_t sock = INVALID_SOCKET;
// NOTE(lsm): on Windows, sock is always > FD_SETSIZE
if ((sock = accept(ls->sock, &sa.sa, &len)) == INVALID_SOCKET) {
} else if ((c = ns_add_sock(ls->mgr, sock, ls->callback,
ls->user_data)) == NULL) {
closesocket(sock);
#ifdef NS_ENABLE_SSL
} else if (ls->ssl_ctx != NULL &&
((c->ssl = SSL_new(ls->ssl_ctx)) == NULL ||
SSL_set_fd(c->ssl, sock) != 1)) {
DBG(("SSL error"));
ns_close_conn(c);
c = NULL;
#endif
} else {
c->listener = ls;
c->proto_data = ls->proto_data;
ns_call(c, NS_ACCEPT, &sa);
DBG(("%p %d %p %p", c, c->sock, c->ssl_ctx, c->ssl));
}
return c;
}
static int ns_is_error(int n) {
return n == 0 ||
(n < 0 && errno != EINTR && errno != EINPROGRESS &&
errno != EAGAIN && errno != EWOULDBLOCK
#ifdef _WIN32
&& WSAGetLastError() != WSAEINTR && WSAGetLastError() != WSAEWOULDBLOCK
#endif
);
}
void ns_sock_to_str(sock_t sock, char *buf, size_t len, int flags) {
union socket_address sa;
socklen_t slen = sizeof(sa);
if (buf != NULL && len > 0) {
buf[0] = '\0';
memset(&sa, 0, sizeof(sa));
if (flags & 4) {
getpeername(sock, &sa.sa, &slen);
} else {
getsockname(sock, &sa.sa, &slen);
}
if (flags & 1) {
#if defined(NS_ENABLE_IPV6)
inet_ntop(sa.sa.sa_family, sa.sa.sa_family == AF_INET ?
(void *) &sa.sin.sin_addr :
(void *) &sa.sin6.sin6_addr, buf, len);
#elif defined(_WIN32)
// Only Windoze Vista (and newer) have inet_ntop()
strncpy(buf, inet_ntoa(sa.sin.sin_addr), len);
#else
inet_ntop(sa.sa.sa_family, (void *) &sa.sin.sin_addr, buf,(socklen_t)len);
#endif
}
if (flags & 2) {
snprintf(buf + strlen(buf), len - (strlen(buf) + 1), "%s%d",
flags & 1 ? ":" : "", (int) ntohs(sa.sin.sin_port));
}
}
}
int ns_hexdump(const void *buf, int len, char *dst, int dst_len) {
const unsigned char *p = (const unsigned char *) buf;
char ascii[17] = "";
int i, idx, n = 0;
for (i = 0; i < len; i++) {
idx = i % 16;
if (idx == 0) {
if (i > 0) n += snprintf(dst + n, dst_len - n, " %s\n", ascii);
n += snprintf(dst + n, dst_len - n, "%04x ", i);
}
n += snprintf(dst + n, dst_len - n, " %02x", p[i]);
ascii[idx] = p[i] < 0x20 || p[i] > 0x7e ? '.' : p[i];
ascii[idx + 1] = '\0';
}
while (i++ % 16) n += snprintf(dst + n, dst_len - n, "%s", " ");
n += snprintf(dst + n, dst_len - n, " %s\n\n", ascii);
return n;
}
#ifdef NS_ENABLE_SSL
static int ns_ssl_err(struct ns_connection *conn, int res) {
int ssl_err = SSL_get_error(conn->ssl, res);
if (ssl_err == SSL_ERROR_WANT_READ) conn->flags |= NSF_WANT_READ;
if (ssl_err == SSL_ERROR_WANT_WRITE) conn->flags |= NSF_WANT_WRITE;
return ssl_err;
}
#endif
static void ns_read_from_socket(struct ns_connection *conn) {
char buf[NS_READ_BUFFER_SIZE];
int n = 0;
if (conn->flags & NSF_CONNECTING) {
int ok = 1, ret;
socklen_t len = sizeof(ok);
ret = getsockopt(conn->sock, SOL_SOCKET, SO_ERROR, (char *) &ok, &len);
(void) ret;
#ifdef NS_ENABLE_SSL
if (ret == 0 && ok == 0 && conn->ssl != NULL) {
int res = SSL_connect(conn->ssl);
int ssl_err = ns_ssl_err(conn, res);
if (res == 1) {
conn->flags |= NSF_SSL_HANDSHAKE_DONE;
} else if (ssl_err == SSL_ERROR_WANT_READ ||
ssl_err == SSL_ERROR_WANT_WRITE) {
return; // Call us again
} else {
ok = 1;
}
}
#endif
conn->flags &= ~NSF_CONNECTING;
DBG(("%p ok=%d", conn, ok));
if (ok != 0) {
conn->flags |= NSF_CLOSE_IMMEDIATELY;
}
ns_call(conn, NS_CONNECT, &ok);
return;
}
#ifdef NS_ENABLE_SSL
if (conn->ssl != NULL) {
if (conn->flags & NSF_SSL_HANDSHAKE_DONE) {
// SSL library may have more bytes ready to read then we ask to read.
// Therefore, read in a loop until we read everything. Without the loop,
// we skip to the next select() cycle which can just timeout.
while ((n = SSL_read(conn->ssl, buf, sizeof(buf))) > 0) {
DBG(("%p %d <- %d bytes (SSL)", conn, conn->flags, n));
iobuf_append(&conn->recv_iobuf, buf, n);
ns_call(conn, NS_RECV, &n);
}
ns_ssl_err(conn, n);
} else {
int res = SSL_accept(conn->ssl);
int ssl_err = ns_ssl_err(conn, res);
if (res == 1) {
conn->flags |= NSF_SSL_HANDSHAKE_DONE;
} else if (ssl_err == SSL_ERROR_WANT_READ ||
ssl_err == SSL_ERROR_WANT_WRITE) {
return; // Call us again
} else {
conn->flags |= NSF_CLOSE_IMMEDIATELY;
}
return;
}
} else
#endif
{
while ((n = (int) recv(conn->sock, buf, sizeof(buf), 0)) > 0) {
DBG(("%p %d <- %d bytes (PLAIN)", conn, conn->flags, n));
iobuf_append(&conn->recv_iobuf, buf, n);
ns_call(conn, NS_RECV, &n);
}
}
if (ns_is_error(n)) {
conn->flags |= NSF_CLOSE_IMMEDIATELY;
}
}
static void ns_write_to_socket(struct ns_connection *conn) {
struct iobuf *io = &conn->send_iobuf;
int n = 0;
#ifdef NS_ENABLE_SSL
if (conn->ssl != NULL) {
n = SSL_write(conn->ssl, io->buf, io->len);
if (n <= 0) {
int ssl_err = ns_ssl_err(conn, n);
if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
return; // Call us again
} else {
conn->flags |= NSF_CLOSE_IMMEDIATELY;
}
}
} else
#endif
{ n = (int) send(conn->sock, io->buf, io->len, 0); }
DBG(("%p %d -> %d bytes", conn, conn->flags, n));
ns_call(conn, NS_SEND, &n);
if (ns_is_error(n)) {
conn->flags |= NSF_CLOSE_IMMEDIATELY;
} else if (n > 0) {
iobuf_remove(io, n);
}
}
int ns_send(struct ns_connection *conn, const void *buf, int len) {
return (int) ns_out(conn, buf, len);
}
static void ns_handle_udp(struct ns_connection *ls) {
struct ns_connection nc;
char buf[NS_UDP_RECEIVE_BUFFER_SIZE];
int n;
socklen_t s_len = sizeof(nc.sa);
memset(&nc, 0, sizeof(nc));
n = recvfrom(ls->sock, buf, sizeof(buf), 0, &nc.sa.sa, &s_len);
if (n <= 0) {
DBG(("%p recvfrom: %s", ls, strerror(errno)));
} else {
nc.mgr = ls->mgr;
nc.recv_iobuf.buf = buf;
nc.recv_iobuf.len = nc.recv_iobuf.size = n;
nc.sock = ls->sock;
nc.callback = ls->callback;
nc.user_data = ls->user_data;
nc.proto_data = ls->proto_data;
nc.mgr = ls->mgr;
nc.listener = ls;
nc.flags = NSF_UDP;
DBG(("%p %d bytes received", ls, n));
ns_call(&nc, NS_RECV, &n);
}
}
static void ns_add_to_set(sock_t sock, fd_set *set, sock_t *max_fd) {
if (sock != INVALID_SOCKET) {
FD_SET(sock, set);
if (*max_fd == INVALID_SOCKET || sock > *max_fd) {
*max_fd = sock;
}
}
}
time_t ns_mgr_poll(struct ns_mgr *mgr, int milli) {
struct ns_connection *conn, *tmp_conn;
struct timeval tv;
fd_set read_set, write_set;
sock_t max_fd = INVALID_SOCKET;
time_t current_time = time(NULL);
FD_ZERO(&read_set);
FD_ZERO(&write_set);
ns_add_to_set(mgr->ctl[1], &read_set, &max_fd);
for (conn = mgr->active_connections; conn != NULL; conn = tmp_conn) {
tmp_conn = conn->next;
if (!(conn->flags & (NSF_LISTENING | NSF_CONNECTING))) {
ns_call(conn, NS_POLL, ¤t_time);
}
if (conn->flags & NSF_CLOSE_IMMEDIATELY) {
ns_close_conn(conn);
} else {
if (!(conn->flags & NSF_WANT_WRITE)) {
//DBG(("%p read_set", conn));
ns_add_to_set(conn->sock, &read_set, &max_fd);
}
if (((conn->flags & NSF_CONNECTING) && !(conn->flags & NSF_WANT_READ)) ||
(conn->send_iobuf.len > 0 && !(conn->flags & NSF_CONNECTING) &&
!(conn->flags & NSF_BUFFER_BUT_DONT_SEND))) {
//DBG(("%p write_set", conn));
ns_add_to_set(conn->sock, &write_set, &max_fd);
}
}
}
tv.tv_sec = milli / 1000;
tv.tv_usec = (milli % 1000) * 1000;
if (select((int) max_fd + 1, &read_set, &write_set, NULL, &tv) > 0) {
// select() might have been waiting for a long time, reset current_time
// now to prevent last_io_time being set to the past.
current_time = time(NULL);
// Read wakeup messages
if (mgr->ctl[1] != INVALID_SOCKET &&
FD_ISSET(mgr->ctl[1], &read_set)) {
struct ctl_msg ctl_msg;
int len = (int) recv(mgr->ctl[1], (char *) &ctl_msg, sizeof(ctl_msg), 0);
send(mgr->ctl[1], ctl_msg.message, 1, 0);
if (len >= (int) sizeof(ctl_msg.callback) && ctl_msg.callback != NULL) {
struct ns_connection *c;
for (c = ns_next(mgr, NULL); c != NULL; c = ns_next(mgr, c)) {
ctl_msg.callback(c, NS_POLL, ctl_msg.message);
}
}
}
for (conn = mgr->active_connections; conn != NULL; conn = tmp_conn) {
tmp_conn = conn->next;
if (FD_ISSET(conn->sock, &read_set)) {
if (conn->flags & NSF_LISTENING) {
if (conn->flags & NSF_UDP) {
ns_handle_udp(conn);
} else {
// We're not looping here, and accepting just one connection at
// a time. The reason is that eCos does not respect non-blocking
// flag on a listening socket and hangs in a loop.
accept_conn(conn);
}
} else {
conn->last_io_time = current_time;
ns_read_from_socket(conn);
}
}
if (FD_ISSET(conn->sock, &write_set)) {
if (conn->flags & NSF_CONNECTING) {
ns_read_from_socket(conn);
} else if (!(conn->flags & NSF_BUFFER_BUT_DONT_SEND)) {
conn->last_io_time = current_time;
ns_write_to_socket(conn);
}
}
}
}
for (conn = mgr->active_connections; conn != NULL; conn = tmp_conn) {
tmp_conn = conn->next;
if ((conn->flags & NSF_CLOSE_IMMEDIATELY) ||
(conn->send_iobuf.len == 0 &&
(conn->flags & NSF_FINISHED_SENDING_DATA))) {
ns_close_conn(conn);
}
}
return current_time;
}
struct ns_connection *ns_connect(struct ns_mgr *mgr, const char *address,
ns_callback_t callback, void *user_data) {
sock_t sock = INVALID_SOCKET;
struct ns_connection *nc = NULL;
union socket_address sa;
char cert[100], ca_cert[100];
int rc, use_ssl, proto;
ns_parse_address(address, &sa, &proto, &use_ssl, cert, ca_cert);
if ((sock = socket(AF_INET, proto, 0)) == INVALID_SOCKET) {
return NULL;
}
ns_set_non_blocking_mode(sock);
rc = (proto == SOCK_DGRAM) ? 0 : connect(sock, &sa.sa, sizeof(sa.sin));
if (rc != 0 && ns_is_error(rc)) {
closesocket(sock);
return NULL;
} else if ((nc = ns_add_sock(mgr, sock, callback, user_data)) == NULL) {
closesocket(sock);
return NULL;
}
nc->sa = sa; // Important, cause UDP conns will use sendto()
nc->flags = (proto == SOCK_DGRAM) ? NSF_UDP : NSF_CONNECTING;
#ifdef NS_ENABLE_SSL
if (use_ssl) {
if ((nc->ssl_ctx = SSL_CTX_new(SSLv23_client_method())) == NULL ||
ns_use_cert(nc->ssl_ctx, cert) != 0 ||
ns_use_ca_cert(nc->ssl_ctx, ca_cert) != 0 ||
(nc->ssl = SSL_new(nc->ssl_ctx)) == NULL) {
ns_close_conn(nc);
return NULL;
} else {
SSL_set_fd(nc->ssl, sock);
}
}
#endif
return nc;
}
struct ns_connection *ns_add_sock(struct ns_mgr *s, sock_t sock,
ns_callback_t callback, void *user_data) {
struct ns_connection *conn;
if ((conn = (struct ns_connection *) NS_MALLOC(sizeof(*conn))) != NULL) {
memset(conn, 0, sizeof(*conn));
ns_set_non_blocking_mode(sock);
ns_set_close_on_exec(sock);
conn->sock = sock;
conn->user_data = user_data;
conn->callback = callback;
conn->mgr = s;
conn->last_io_time = time(NULL);
ns_add_conn(s, conn);
DBG(("%p %d", conn, sock));
}
return conn;
}
struct ns_connection *ns_next(struct ns_mgr *s, struct ns_connection *conn) {
return conn == NULL ? s->active_connections : conn->next;
}
void ns_broadcast(struct ns_mgr *mgr, ns_callback_t cb,void *data, size_t len) {
struct ctl_msg ctl_msg;
if (mgr->ctl[0] != INVALID_SOCKET && data != NULL &&
len < sizeof(ctl_msg.message)) {
ctl_msg.callback = cb;
memcpy(ctl_msg.message, data, len);
send(mgr->ctl[0], (char *) &ctl_msg,
offsetof(struct ctl_msg, message) + len, 0);
recv(mgr->ctl[0], (char *) &len, 1, 0);
}
}
void ns_mgr_init(struct ns_mgr *s, void *user_data) {
memset(s, 0, sizeof(*s));
s->ctl[0] = s->ctl[1] = INVALID_SOCKET;
s->user_data = user_data;
#ifdef _WIN32
{ WSADATA data; WSAStartup(MAKEWORD(2, 2), &data); }
#else
// Ignore SIGPIPE signal, so if client cancels the request, it
// won't kill the whole process.
signal(SIGPIPE, SIG_IGN);
#endif
#ifndef NS_DISABLE_SOCKETPAIR
do {
ns_socketpair2(s->ctl, SOCK_DGRAM);
} while (s->ctl[0] == INVALID_SOCKET);
#endif
#ifdef NS_ENABLE_SSL
{static int init_done; if (!init_done) { SSL_library_init(); init_done++; }}
#endif
}
void ns_mgr_free(struct ns_mgr *s) {
struct ns_connection *conn, *tmp_conn;
DBG(("%p", s));
if (s == NULL) return;
// Do one last poll, see https://github.com/cesanta/mongoose/issues/286
ns_mgr_poll(s, 0);
if (s->ctl[0] != INVALID_SOCKET) closesocket(s->ctl[0]);
if (s->ctl[1] != INVALID_SOCKET) closesocket(s->ctl[1]);
s->ctl[0] = s->ctl[1] = INVALID_SOCKET;
for (conn = s->active_connections; conn != NULL; conn = tmp_conn) {
tmp_conn = conn->next;
ns_close_conn(conn);
}
}
// net_skeleton end
#endif // NOEMBED_NET_SKELETON
#include <ctype.h>
#ifdef _WIN32 //////////////// Windows specific defines and includes
#include <io.h> // For _lseeki64
#include <direct.h> // For _mkdir
#ifndef S_ISDIR
#define S_ISDIR(x) ((x) & _S_IFDIR)
#endif
#ifdef stat
#undef stat
#endif
#ifdef lseek
#undef lseek
#endif
#ifdef popen
#undef popen
#endif
#ifdef pclose
#undef pclose
#endif
#define stat(x, y) mg_stat((x), (y))
#define fopen(x, y) mg_fopen((x), (y))
#define open(x, y, z) mg_open((x), (y), (z))
#define close(x) _close(x)
#define fileno(x) _fileno(x)
#define lseek(x, y, z) _lseeki64((x), (y), (z))
#define read(x, y, z) _read((x), (y), (z))
#define write(x, y, z) _write((x), (y), (z))
#define popen(x, y) _popen((x), (y))
#define pclose(x) _pclose(x)
#define mkdir(x, y) _mkdir(x)
#define rmdir(x) _rmdir(x)
#define strdup(x) _strdup(x)
#ifndef __func__
#define STRX(x) #x
#define STR(x) STRX(x)
#define __func__ __FILE__ ":" STR(__LINE__)
#endif
/* MINGW has adopted the MSVC formatting for 64-bit ints as of gcc 4.4 till 4.8*/
#if (defined(__MINGW32__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4 && __GNUC_MINOR__ < 8))) || defined(_MSC_VER)
#define INT64_FMT "I64d"
#else
#define INT64_FMT "lld"
#endif
#define flockfile(x) ((void) (x))
#define funlockfile(x) ((void) (x))
typedef struct _stati64 file_stat_t;
typedef HANDLE process_id_t;
#else ////////////// UNIX specific defines and includes
#if !defined(MONGOOSE_NO_FILESYSTEM) &&\
(!defined(MONGOOSE_NO_DAV) || !defined(MONGOOSE_NO_DIRECTORY_LISTING))
#include <dirent.h>
#endif
#if !defined(MONGOOSE_NO_FILESYSTEM) && !defined(MONGOOSE_NO_DL)
#include <dlfcn.h>
#endif
#include <inttypes.h>
#include <pwd.h>
#if !defined(O_BINARY)
#define O_BINARY 0
#endif
#define INT64_FMT PRId64
typedef struct stat file_stat_t;
typedef pid_t process_id_t;
#endif //////// End of platform-specific defines and includes
#include "mongoose.h"
#define MAX_REQUEST_SIZE 16384
#define IOBUF_SIZE 8192
#define MAX_PATH_SIZE 8192
#define DEFAULT_CGI_PATTERN "**.cgi$|**.pl$|**.php$"
#define CGI_ENVIRONMENT_SIZE 8192
#define MAX_CGI_ENVIR_VARS 64
#define ENV_EXPORT_TO_CGI "MONGOOSE_CGI"
#define PASSWORDS_FILE_NAME ".htpasswd"
#ifndef MONGOOSE_USE_WEBSOCKET_PING_INTERVAL
#define MONGOOSE_USE_WEBSOCKET_PING_INTERVAL 5
#endif
// Extra HTTP headers to send in every static file reply
#if !defined(MONGOOSE_USE_EXTRA_HTTP_HEADERS)
#define MONGOOSE_USE_EXTRA_HTTP_HEADERS ""
#endif
#ifndef MONGOOSE_POST_SIZE_LIMIT
#define MONGOOSE_POST_SIZE_LIMIT 0
#endif
#ifndef MONGOOSE_IDLE_TIMEOUT_SECONDS
#define MONGOOSE_IDLE_TIMEOUT_SECONDS 300
#endif
#if defined(NS_DISABLE_SOCKETPAIR) && !defined(MONGOOSE_NO_CGI)
#define MONGOOSE_NO_CGI
#endif
#ifdef MONGOOSE_NO_FILESYSTEM
#define MONGOOSE_NO_AUTH
#if !defined(MONGOOSE_NO_CGI)
#define MONGOOSE_NO_CGI
#endif
#define MONGOOSE_NO_DAV
#define MONGOOSE_NO_DIRECTORY_LISTING
#define MONGOOSE_NO_LOGGING
#define MONGOOSE_NO_SSI
#define MONGOOSE_NO_DL
#endif
struct vec {
const char *ptr;
int len;
};
// For directory listing and WevDAV support
struct dir_entry {
struct connection *conn;
char *file_name;
file_stat_t st;
};
// NOTE(lsm): this enum shoulds be in sync with the config_options.
enum {
ACCESS_CONTROL_LIST,
#ifndef MONGOOSE_NO_FILESYSTEM
ACCESS_LOG_FILE,
#ifndef MONGOOSE_NO_AUTH
AUTH_DOMAIN,
#endif
#ifndef MONGOOSE_NO_CGI
CGI_INTERPRETER,
CGI_PATTERN,
#endif
DAV_AUTH_FILE,
DOCUMENT_ROOT,
#ifndef MONGOOSE_NO_DIRECTORY_LISTING
ENABLE_DIRECTORY_LISTING,
#endif
#endif
ENABLE_PROXY,
EXTRA_MIME_TYPES,
#if !defined(MONGOOSE_NO_FILESYSTEM) && !defined(MONGOOSE_NO_AUTH)
GLOBAL_AUTH_FILE,
#endif
#ifndef MONGOOSE_NO_FILESYSTEM
HIDE_FILES_PATTERN,
HEXDUMP_FILE,
INDEX_FILES,
#endif
LISTENING_PORT,
#ifndef _WIN32
RUN_AS_USER,
#endif
#ifndef MONGOOSE_NO_SSI
SSI_PATTERN,
#endif
URL_REWRITES,
NUM_OPTIONS
};
static const char *static_config_options[] = {
"access_control_list", NULL,
#ifndef MONGOOSE_NO_FILESYSTEM
"access_log_file", NULL,
#ifndef MONGOOSE_NO_AUTH
"auth_domain", "mydomain.com",
#endif
#ifndef MONGOOSE_NO_CGI
"cgi_interpreter", NULL,
"cgi_pattern", DEFAULT_CGI_PATTERN,
#endif
"dav_auth_file", NULL,
"document_root", NULL,
#ifndef MONGOOSE_NO_DIRECTORY_LISTING
"enable_directory_listing", "yes",
#endif
#endif
"enable_proxy", NULL,
"extra_mime_types", NULL,
#if !defined(MONGOOSE_NO_FILESYSTEM) && !defined(MONGOOSE_NO_AUTH)
"global_auth_file", NULL,
#endif
#ifndef MONGOOSE_NO_FILESYSTEM
"hide_files_patterns", NULL,
"hexdump_file", NULL,
"index_files","index.html,index.htm,index.shtml,index.cgi,index.php",
#endif
"listening_port", NULL,
#ifndef _WIN32
"run_as_user", NULL,
#endif
#ifndef MONGOOSE_NO_SSI
"ssi_pattern", "**.shtml$|**.shtm$",
#endif
"url_rewrites", NULL,
NULL
};
struct mg_server {
struct ns_mgr ns_mgr;
union socket_address lsa; // Listening socket address
mg_handler_t event_handler;
char *config_options[NUM_OPTIONS];
};
// Local endpoint representation
union endpoint {
int fd; // Opened regular local file
struct ns_connection *nc; // CGI or proxy->target connection
};
enum endpoint_type {
EP_NONE, EP_FILE, EP_CGI, EP_USER, EP_PUT, EP_CLIENT, EP_PROXY
};
#define MG_HEADERS_SENT NSF_USER_1
#define MG_LONG_RUNNING NSF_USER_2
#define MG_CGI_CONN NSF_USER_3
#define MG_PROXY_CONN NSF_USER_4
#define MG_PROXY_DONT_PARSE NSF_USER_5
struct connection {
struct ns_connection *ns_conn; // NOTE(lsm): main.c depends on this order
struct mg_connection mg_conn;
struct mg_server *server;
union endpoint endpoint;
enum endpoint_type endpoint_type;
char *path_info;
char *request;
int64_t num_bytes_recv; // Total number of bytes received
int64_t cl; // Reply content length, for Range support
int request_len; // Request length, including last \r\n after last header
};
#define MG_CONN_2_CONN(c) ((struct connection *) ((char *) (c) - \
offsetof(struct connection, mg_conn)))
static void open_local_endpoint(struct connection *conn, int skip_user);
static void close_local_endpoint(struct connection *conn);
static void mg_ev_handler(struct ns_connection *nc, int ev, void *p);
static const struct {
const char *extension;
size_t ext_len;
const char *mime_type;
} static_builtin_mime_types[] = {
{".html", 5, "text/html"},
{".htm", 4, "text/html"},
{".shtm", 5, "text/html"},
{".shtml", 6, "text/html"},
{".css", 4, "text/css"},
{".js", 3, "application/javascript"},
{".ico", 4, "image/x-icon"},
{".gif", 4, "image/gif"},
{".jpg", 4, "image/jpeg"},
{".jpeg", 5, "image/jpeg"},
{".png", 4, "image/png"},
{".svg", 4, "image/svg+xml"},
{".txt", 4, "text/plain"},
{".torrent", 8, "application/x-bittorrent"},
{".wav", 4, "audio/x-wav"},
{".mp3", 4, "audio/x-mp3"},
{".mid", 4, "audio/mid"},
{".m3u", 4, "audio/x-mpegurl"},
{".ogg", 4, "application/ogg"},
{".ram", 4, "audio/x-pn-realaudio"},
{".xml", 4, "text/xml"},
{".json", 5, "application/json"},
{".xslt", 5, "application/xml"},
{".xsl", 4, "application/xml"},
{".ra", 3, "audio/x-pn-realaudio"},
{".doc", 4, "application/msword"},
{".exe", 4, "application/octet-stream"},
{".zip", 4, "application/x-zip-compressed"},
{".xls", 4, "application/excel"},
{".tgz", 4, "application/x-tar-gz"},
{".tar", 4, "application/x-tar"},
{".gz", 3, "application/x-gunzip"},
{".arj", 4, "application/x-arj-compressed"},
{".rar", 4, "application/x-rar-compressed"},
{".rtf", 4, "application/rtf"},
{".pdf", 4, "application/pdf"},
{".swf", 4, "application/x-shockwave-flash"},
{".mpg", 4, "video/mpeg"},
{".webm", 5, "video/webm"},
{".mpeg", 5, "video/mpeg"},
{".mov", 4, "video/quicktime"},
{".mp4", 4, "video/mp4"},
{".m4v", 4, "video/x-m4v"},
{".asf", 4, "video/x-ms-asf"},
{".avi", 4, "video/x-msvideo"},
{".bmp", 4, "image/bmp"},
{".ttf", 4, "application/x-font-ttf"},
{NULL, 0, NULL}
};
#ifndef MONGOOSE_NO_THREADS
void *mg_start_thread(void *(*f)(void *), void *p) {
return ns_start_thread(f, p);
}
#endif // MONGOOSE_NO_THREADS
#ifndef MONGOOSE_NO_MMAP
#ifdef _WIN32
static void *mmap(void *addr, int64_t len, int prot, int flags, int fd,
int offset) {
HANDLE fh = (HANDLE) _get_osfhandle(fd);
HANDLE mh = CreateFileMapping(fh, 0, PAGE_READONLY, 0, 0, 0);
void *p = MapViewOfFile(mh, FILE_MAP_READ, 0, 0, (size_t) len);
CloseHandle(mh);
return p;
}
#define munmap(x, y) UnmapViewOfFile(x)
#define MAP_FAILED NULL
#define MAP_PRIVATE 0
#define PROT_READ 0
#else
#include <sys/mman.h>
#endif
void *mg_mmap(FILE *fp, size_t size) {
void *p = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fileno(fp), 0);
return p == MAP_FAILED ? NULL : p;
}
void mg_munmap(void *p, size_t size) {
munmap(p, size);
}
#endif // MONGOOSE_NO_MMAP
#if defined(_WIN32) && !defined(MONGOOSE_NO_FILESYSTEM)
// Encode 'path' which is assumed UTF-8 string, into UNICODE string.
// wbuf and wbuf_len is a target buffer and its length.
static void to_wchar(const char *path, wchar_t *wbuf, size_t wbuf_len) {
char buf[MAX_PATH_SIZE * 2], buf2[MAX_PATH_SIZE * 2], *p;
strncpy(buf, path, sizeof(buf));
buf[sizeof(buf) - 1] = '\0';
// Trim trailing slashes. Leave backslash for paths like "X:\"
p = buf + strlen(buf) - 1;
while (p > buf && p[-1] != ':' && (p[0] == '\\' || p[0] == '/')) *p-- = '\0';
// Convert to Unicode and back. If doubly-converted string does not
// match the original, something is fishy, reject.
memset(wbuf, 0, wbuf_len * sizeof(wchar_t));
MultiByteToWideChar(CP_UTF8, 0, buf, -1, wbuf, (int) wbuf_len);
WideCharToMultiByte(CP_UTF8, 0, wbuf, (int) wbuf_len, buf2, sizeof(buf2),
NULL, NULL);
if (strcmp(buf, buf2) != 0) {
wbuf[0] = L'\0';
}
}
static int mg_stat(const char *path, file_stat_t *st) {
wchar_t wpath[MAX_PATH_SIZE];
to_wchar(path, wpath, ARRAY_SIZE(wpath));
DBG(("[%ls] -> %d", wpath, _wstati64(wpath, st)));
return _wstati64(wpath, st);
}
static FILE *mg_fopen(const char *path, const char *mode) {
wchar_t wpath[MAX_PATH_SIZE], wmode[10];
to_wchar(path, wpath, ARRAY_SIZE(wpath));
to_wchar(mode, wmode, ARRAY_SIZE(wmode));
return _wfopen(wpath, wmode);
}
static int mg_open(const char *path, int flag, int mode) {
wchar_t wpath[MAX_PATH_SIZE];
to_wchar(path, wpath, ARRAY_SIZE(wpath));
return _wopen(wpath, flag, mode);
}
#endif // _WIN32 && !MONGOOSE_NO_FILESYSTEM
// A helper function for traversing a comma separated list of values.
// It returns a list pointer shifted to the next value, or NULL if the end
// of the list found.
// Value is stored in val vector. If value has form "x=y", then eq_val
// vector is initialized to point to the "y" part, and val vector length
// is adjusted to point only to "x".
static const char *next_option(const char *list, struct vec *val,
struct vec *eq_val) {
if (list == NULL || *list == '\0') {
// End of the list
list = NULL;
} else {
val->ptr = list;
if ((list = strchr(val->ptr, ',')) != NULL) {
// Comma found. Store length and shift the list ptr
val->len = list - val->ptr;
list++;
} else {
// This value is the last one
list = val->ptr + strlen(val->ptr);
val->len = list - val->ptr;
}
if (eq_val != NULL) {
// Value has form "x=y", adjust pointers and lengths
// so that val points to "x", and eq_val points to "y".
eq_val->len = 0;
eq_val->ptr = (const char *) memchr(val->ptr, '=', val->len);
if (eq_val->ptr != NULL) {
eq_val->ptr++; // Skip over '=' character
eq_val->len = val->ptr + val->len - eq_val->ptr;
val->len = (eq_val->ptr - val->ptr) - 1;
}
}
}
return list;
}
// Like snprintf(), but never returns negative value, or a value
// that is larger than a supplied buffer.
static int mg_vsnprintf(char *buf, size_t buflen, const char *fmt, va_list ap) {
int n;
if (buflen < 1) return 0;
n = vsnprintf(buf, buflen, fmt, ap);
if (n < 0) {
n = 0;
} else if (n >= (int) buflen) {
n = (int) buflen - 1;
}
buf[n] = '\0';
return n;
}
static int mg_snprintf(char *buf, size_t buflen, const char *fmt, ...) {
va_list ap;
int n;
va_start(ap, fmt);
n = mg_vsnprintf(buf, buflen, fmt, ap);
va_end(ap);
return n;
}
// Check whether full request is buffered. Return:
// -1 if request is malformed
// 0 if request is not yet fully buffered
// >0 actual request length, including last \r\n\r\n
static int get_request_len(const char *s, int buf_len) {
const unsigned char *buf = (unsigned char *) s;
int i;
for (i = 0; i < buf_len; i++) {
// Control characters are not allowed but >=128 are.
// Abort scan as soon as one malformed character is found.
if (!isprint(buf[i]) && buf[i] != '\r' && buf[i] != '\n' && buf[i] < 128) {
return -1;
} else if (buf[i] == '\n' && i + 1 < buf_len && buf[i + 1] == '\n') {
return i + 2;
} else if (buf[i] == '\n' && i + 2 < buf_len && buf[i + 1] == '\r' &&
buf[i + 2] == '\n') {
return i + 3;
}
}
return 0;
}
// Skip the characters until one of the delimiters characters found.
// 0-terminate resulting word. Skip the rest of the delimiters if any.
// Advance pointer to buffer to the next word. Return found 0-terminated word.
static char *skip(char **buf, const char *delimiters) {
char *p, *begin_word, *end_word, *end_delimiters;
begin_word = *buf;
end_word = begin_word + strcspn(begin_word, delimiters);
end_delimiters = end_word + strspn(end_word, delimiters);
for (p = end_word; p < end_delimiters; p++) {
*p = '\0';
}
*buf = end_delimiters;
return begin_word;
}
// Parse HTTP headers from the given buffer, advance buffer to the point
// where parsing stopped.
static void parse_http_headers(char **buf, struct mg_connection *ri) {
size_t i;
for (i = 0; i < ARRAY_SIZE(ri->http_headers); i++) {
ri->http_headers[i].name = skip(buf, ": ");
ri->http_headers[i].value = skip(buf, "\r\n");
if (ri->http_headers[i].name[0] == '\0')
break;
ri->num_headers = i + 1;
}
}
static const char *status_code_to_str(int status_code) {
switch (status_code) {
case 100: return "Continue";
case 101: return "Switching Protocols";
case 102: return "Processing";
case 200: return "OK";
case 201: return "Created";
case 202: return "Accepted";
case 203: return "Non-Authoritative Information";
case 204: return "No Content";
case 205: return "Reset Content";
case 206: return "Partial Content";
case 207: return "Multi-Status";
case 208: return "Already Reported";
case 226: return "IM Used";
case 300: return "Multiple Choices";
case 301: return "Moved Permanently";
case 302: return "Found";
case 303: return "See Other";
case 304: return "Not Modified";
case 305: return "Use Proxy";
case 306: return "Switch Proxy";
case 307: return "Temporary Redirect";
case 308: return "Permanent Redirect";
case 400: return "Bad Request";
case 401: return "Unauthorized";
case 402: return "Payment Required";
case 403: return "Forbidden";
case 404: return "Not Found";
case 405: return "Method Not Allowed";
case 406: return "Not Acceptable";
case 407: return "Proxy Authentication Required";
case 408: return "Request Timeout";
case 409: return "Conflict";
case 410: return "Gone";
case 411: return "Length Required";
case 412: return "Precondition Failed";
case 413: return "Payload Too Large";
case 414: return "URI Too Long";
case 415: return "Unsupported Media Type";
case 416: return "Requested Range Not Satisfiable";
case 417: return "Expectation Failed";
case 418: return "I\'m a teapot";
case 422: return "Unprocessable Entity";
case 423: return "Locked";
case 424: return "Failed Dependency";
case 426: return "Upgrade Required";
case 428: return "Precondition Required";
case 429: return "Too Many Requests";
case 431: return "Request Header Fields Too Large";
case 451: return "Unavailable For Legal Reasons";
case 500: return "Internal Server Error";
case 501: return "Not Implemented";
case 502: return "Bad Gateway";
case 503: return "Service Unavailable";
case 504: return "Gateway Timeout";
case 505: return "HTTP Version Not Supported";
case 506: return "Variant Also Negotiates";
case 507: return "Insufficient Storage";
case 508: return "Loop Detected";
case 510: return "Not Extended";
case 511: return "Network Authentication Required";
default: return "Server Error";
}
}
static int call_user(struct connection *conn, enum mg_event ev) {
return conn != NULL && conn->server != NULL &&
conn->server->event_handler != NULL ?
conn->server->event_handler(&conn->mg_conn, ev) : MG_FALSE;
}
static void send_http_error(struct connection *conn, int code,
const char *fmt, ...) {
const char *message = status_code_to_str(code);
const char *rewrites = conn->server->config_options[URL_REWRITES];
char headers[200], body[200];
struct vec a, b;
va_list ap;
int body_len, headers_len, match_code;
conn->mg_conn.status_code = code;
// Invoke error handler if it is set
if (call_user(conn, MG_HTTP_ERROR) == MG_TRUE) {
close_local_endpoint(conn);
return;
}
// Handle error code rewrites
while ((rewrites = next_option(rewrites, &a, &b)) != NULL) {
if ((match_code = atoi(a.ptr)) > 0 && match_code == code) {
struct mg_connection *c = &conn->mg_conn;
c->status_code = 302;
mg_printf(c, "HTTP/1.1 %d Moved\r\n"
"Location: %.*s?code=%d&orig_uri=%s&query_string=%s\r\n\r\n",
c->status_code, b.len, b.ptr, code, c->uri,
c->query_string == NULL ? "" : c->query_string);
close_local_endpoint(conn);
return;
}
}
body_len = mg_snprintf(body, sizeof(body), "%d %s\n", code, message);
if (fmt != NULL) {
va_start(ap, fmt);
body_len += mg_vsnprintf(body + body_len, sizeof(body) - body_len, fmt, ap);
va_end(ap);
}
if ((code >= 300 && code <= 399) || code == 204) {
// 3xx errors do not have body
body_len = 0;
}
headers_len = mg_snprintf(headers, sizeof(headers),
"HTTP/1.1 %d %s\r\nContent-Length: %d\r\n"
"Content-Type: text/plain\r\n\r\n",
code, message, body_len);
ns_send(conn->ns_conn, headers, headers_len);
ns_send(conn->ns_conn, body, body_len);
close_local_endpoint(conn); // This will write to the log file
}
static void write_chunk(struct connection *conn, const char *buf, int len) {
char chunk_size[50];
int n = mg_snprintf(chunk_size, sizeof(chunk_size), "%X\r\n", len);
ns_send(conn->ns_conn, chunk_size, n);
ns_send(conn->ns_conn, buf, len);
ns_send(conn->ns_conn, "\r\n", 2);
}
size_t mg_printf(struct mg_connection *conn, const char *fmt, ...) {
struct connection *c = MG_CONN_2_CONN(conn);
va_list ap;
va_start(ap, fmt);
ns_vprintf(c->ns_conn, fmt, ap);
va_end(ap);
return c->ns_conn->send_iobuf.len;
}
static void ns_forward(struct ns_connection *from, struct ns_connection *to) {
DBG(("%p -> %p %lu bytes", from, to, (unsigned long)from->recv_iobuf.len));
ns_send(to, from->recv_iobuf.buf, from->recv_iobuf.len);
iobuf_remove(&from->recv_iobuf, from->recv_iobuf.len);
}
#ifndef MONGOOSE_NO_CGI
#ifdef _WIN32
struct threadparam {
sock_t s;
HANDLE hPipe;
};
static int wait_until_ready(sock_t sock, int for_read) {
fd_set set;
FD_ZERO(&set);
FD_SET(sock, &set);
select(sock + 1, for_read ? &set : 0, for_read ? 0 : &set, 0, 0);
return 1;
}
static void *push_to_stdin(void *arg) {
struct threadparam *tp = (struct threadparam *)arg;
int n, sent, stop = 0;
DWORD k;
char buf[IOBUF_SIZE];
while (!stop && wait_until_ready(tp->s, 1) &&
(n = recv(tp->s, buf, sizeof(buf), 0)) > 0) {
if (n == -1 && GetLastError() == WSAEWOULDBLOCK) continue;
for (sent = 0; !stop && sent < n; sent += k) {
if (!WriteFile(tp->hPipe, buf + sent, n - sent, &k, 0)) stop = 1;
}
}
DBG(("%s", "FORWARED EVERYTHING TO CGI"));
CloseHandle(tp->hPipe);
NS_FREE(tp);
_endthread();
return NULL;
}
static void *pull_from_stdout(void *arg) {
struct threadparam *tp = (struct threadparam *)arg;
int k = 0, stop = 0;
DWORD n, sent;
char buf[IOBUF_SIZE];
while (!stop && ReadFile(tp->hPipe, buf, sizeof(buf), &n, NULL)) {
for (sent = 0; !stop && sent < n; sent += k) {
if (wait_until_ready(tp->s, 0) &&
(k = send(tp->s, buf + sent, n - sent, 0)) <= 0) stop = 1;
}
}
DBG(("%s", "EOF FROM CGI"));
CloseHandle(tp->hPipe);
shutdown(tp->s, 2); // Without this, IO thread may get truncated data
closesocket(tp->s);
NS_FREE(tp);
_endthread();
return NULL;
}
static void spawn_stdio_thread(sock_t sock, HANDLE hPipe,
void *(*func)(void *)) {
struct threadparam *tp = (struct threadparam *)NS_MALLOC(sizeof(*tp));
if (tp != NULL) {
tp->s = sock;
tp->hPipe = hPipe;
mg_start_thread(func, tp);
}
}
static void abs_path(const char *utf8_path, char *abs_path, size_t len) {
wchar_t buf[MAX_PATH_SIZE], buf2[MAX_PATH_SIZE];
to_wchar(utf8_path, buf, ARRAY_SIZE(buf));
GetFullPathNameW(buf, ARRAY_SIZE(buf2), buf2, NULL);
WideCharToMultiByte(CP_UTF8, 0, buf2, wcslen(buf2) + 1, abs_path, len, 0, 0);
}
static process_id_t start_process(char *interp, const char *cmd,
const char *env, const char *envp[],
const char *dir, sock_t sock) {
STARTUPINFOW si;
PROCESS_INFORMATION pi;
HANDLE a[2], b[2], me = GetCurrentProcess();
wchar_t wcmd[MAX_PATH_SIZE], full_dir[MAX_PATH_SIZE];
char buf[MAX_PATH_SIZE], buf4[MAX_PATH_SIZE], buf5[MAX_PATH_SIZE],
cmdline[MAX_PATH_SIZE], *p;
DWORD flags = DUPLICATE_CLOSE_SOURCE | DUPLICATE_SAME_ACCESS;
FILE *fp;
memset(&si, 0, sizeof(si));
memset(&pi, 0, sizeof(pi));
si.cb = sizeof(si);
si.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;
si.wShowWindow = SW_HIDE;
si.hStdError = GetStdHandle(STD_ERROR_HANDLE);
CreatePipe(&a[0], &a[1], NULL, 0);
CreatePipe(&b[0], &b[1], NULL, 0);
DuplicateHandle(me, a[0], me, &si.hStdInput, 0, TRUE, flags);
DuplicateHandle(me, b[1], me, &si.hStdOutput, 0, TRUE, flags);
if (interp == NULL && (fp = fopen(cmd, "r")) != NULL) {
buf[0] = buf[1] = '\0';
fgets(buf, sizeof(buf), fp);
buf[sizeof(buf) - 1] = '\0';
if (buf[0] == '#' && buf[1] == '!') {
interp = buf + 2;
for (p = interp + strlen(interp) - 1;
isspace(* (uint8_t *) p) && p > interp; p--) *p = '\0';
}
fclose(fp);
}
if (interp != NULL) {
abs_path(interp, buf4, ARRAY_SIZE(buf4));
interp = buf4;
}
abs_path(dir, buf5, ARRAY_SIZE(buf5));
to_wchar(dir, full_dir, ARRAY_SIZE(full_dir));
mg_snprintf(cmdline, sizeof(cmdline), "%s%s\"%s\"",
interp ? interp : "", interp ? " " : "", cmd);
to_wchar(cmdline, wcmd, ARRAY_SIZE(wcmd));
if (CreateProcessW(NULL, wcmd, NULL, NULL, TRUE, CREATE_NEW_PROCESS_GROUP,
(void *) env, full_dir, &si, &pi) != 0) {
spawn_stdio_thread(sock, a[1], push_to_stdin);
spawn_stdio_thread(sock, b[0], pull_from_stdout);
} else {
CloseHandle(a[1]);
CloseHandle(b[0]);
closesocket(sock);
}
DBG(("CGI command: [%ls] -> %p", wcmd, pi.hProcess));
// Not closing a[0] and b[1] because we've used DUPLICATE_CLOSE_SOURCE
CloseHandle(si.hStdOutput);
CloseHandle(si.hStdInput);
//CloseHandle(pi.hThread);
//CloseHandle(pi.hProcess);
return pi.hProcess;
}
#else
static process_id_t start_process(const char *interp, const char *cmd,
const char *env, const char *envp[],
const char *dir, sock_t sock) {
char buf[500];
process_id_t pid = fork();
(void) env;
if (pid == 0) {
(void) chdir(dir);
(void) dup2(sock, 0);
(void) dup2(sock, 1);
closesocket(sock);
// After exec, all signal handlers are restored to their default values,
// with one exception of SIGCHLD. According to POSIX.1-2001 and Linux's
// implementation, SIGCHLD's handler will leave unchanged after exec
// if it was set to be ignored. Restore it to default action.
signal(SIGCHLD, SIG_DFL);
if (interp == NULL) {
execle(cmd, cmd, (char *) 0, envp); // Using (char *) 0 to avoid warning
} else {
execle(interp, interp, cmd, (char *) 0, envp);
}
snprintf(buf, sizeof(buf), "Status: 500\r\n\r\n"
"500 Server Error: %s%s%s: %s", interp == NULL ? "" : interp,
interp == NULL ? "" : " ", cmd, strerror(errno));
send(1, buf, strlen(buf), 0);
exit(EXIT_FAILURE); // exec call failed
}
return pid;
}
#endif // _WIN32
// This structure helps to create an environment for the spawned CGI program.
// Environment is an array of "VARIABLE=VALUE\0" ASCIIZ strings,
// last element must be NULL.
// However, on Windows there is a requirement that all these VARIABLE=VALUE\0
// strings must reside in a contiguous buffer. The end of the buffer is
// marked by two '\0' characters.
// We satisfy both worlds: we create an envp array (which is vars), all
// entries are actually pointers inside buf.
struct cgi_env_block {
struct mg_connection *conn;
char buf[CGI_ENVIRONMENT_SIZE]; // Environment buffer
const char *vars[MAX_CGI_ENVIR_VARS]; // char *envp[]
int len; // Space taken
int nvars; // Number of variables in envp[]
};
// Append VARIABLE=VALUE\0 string to the buffer, and add a respective
// pointer into the vars array.
static char *addenv(struct cgi_env_block *block, const char *fmt, ...) {
int n, space;
char *added;
va_list ap;
// Calculate how much space is left in the buffer
space = sizeof(block->buf) - block->len - 2;
assert(space >= 0);
// Make a pointer to the free space int the buffer
added = block->buf + block->len;
// Copy VARIABLE=VALUE\0 string into the free space
va_start(ap, fmt);
n = mg_vsnprintf(added, (size_t) space, fmt, ap);
va_end(ap);
// Make sure we do not overflow buffer and the envp array
if (n > 0 && n + 1 < space &&
block->nvars < (int) ARRAY_SIZE(block->vars) - 2) {
// Append a pointer to the added string into the envp array
block->vars[block->nvars++] = added;
// Bump up used length counter. Include \0 terminator
block->len += n + 1;
}
return added;
}
static void addenv2(struct cgi_env_block *blk, const char *name) {
const char *s;
if ((s = getenv(name)) != NULL) addenv(blk, "%s=%s", name, s);
}
static void prepare_cgi_environment(struct connection *conn,
const char *prog,
struct cgi_env_block *blk) {
struct mg_connection *ri = &conn->mg_conn;
const char *s, *slash;
char *p, **opts = conn->server->config_options;
int i;
blk->len = blk->nvars = 0;
blk->conn = ri;
if ((s = getenv("SERVER_NAME")) != NULL) {
addenv(blk, "SERVER_NAME=%s", s);
} else {
addenv(blk, "SERVER_NAME=%s", ri->local_ip);
}
addenv(blk, "SERVER_ROOT=%s", opts[DOCUMENT_ROOT]);
addenv(blk, "DOCUMENT_ROOT=%s", opts[DOCUMENT_ROOT]);
addenv(blk, "SERVER_SOFTWARE=%s/%s", "Mongoose", MONGOOSE_VERSION);
// Prepare the environment block
addenv(blk, "%s", "GATEWAY_INTERFACE=CGI/1.1");
addenv(blk, "%s", "SERVER_PROTOCOL=HTTP/1.1");
addenv(blk, "%s", "REDIRECT_STATUS=200"); // For PHP
// TODO(lsm): fix this for IPv6 case
//addenv(blk, "SERVER_PORT=%d", ri->remote_port);
addenv(blk, "REQUEST_METHOD=%s", ri->request_method);
addenv(blk, "REMOTE_ADDR=%s", ri->remote_ip);
addenv(blk, "REMOTE_PORT=%d", ri->remote_port);
addenv(blk, "REQUEST_URI=%s%s%s", ri->uri,
ri->query_string == NULL ? "" : "?",
ri->query_string == NULL ? "" : ri->query_string);
// SCRIPT_NAME
if (conn->path_info != NULL) {
addenv(blk, "SCRIPT_NAME=%.*s",
(int) (strlen(ri->uri) - strlen(conn->path_info)), ri->uri);
addenv(blk, "PATH_INFO=%s", conn->path_info);
} else {
s = strrchr(prog, '/');
slash = strrchr(ri->uri, '/');
addenv(blk, "SCRIPT_NAME=%.*s%s",
slash == NULL ? 0 : (int) (slash - ri->uri), ri->uri,
s == NULL ? prog : s);
}
addenv(blk, "SCRIPT_FILENAME=%s", prog);
addenv(blk, "PATH_TRANSLATED=%s", prog);
addenv(blk, "HTTPS=%s", conn->ns_conn->ssl != NULL ? "on" : "off");
if ((s = mg_get_header(ri, "Content-Type")) != NULL)
addenv(blk, "CONTENT_TYPE=%s", s);
if (ri->query_string != NULL)
addenv(blk, "QUERY_STRING=%s", ri->query_string);
if ((s = mg_get_header(ri, "Content-Length")) != NULL)
addenv(blk, "CONTENT_LENGTH=%s", s);
addenv2(blk, "PATH");
addenv2(blk, "TMP");
addenv2(blk, "TEMP");
addenv2(blk, "TMPDIR");
addenv2(blk, "PERLLIB");
addenv2(blk, ENV_EXPORT_TO_CGI);
#if defined(_WIN32)
addenv2(blk, "COMSPEC");
addenv2(blk, "SYSTEMROOT");
addenv2(blk, "SystemDrive");
addenv2(blk, "ProgramFiles");
addenv2(blk, "ProgramFiles(x86)");
addenv2(blk, "CommonProgramFiles(x86)");
#else
addenv2(blk, "LD_LIBRARY_PATH");
#endif // _WIN32
// Add all headers as HTTP_* variables
for (i = 0; i < ri->num_headers; i++) {
p = addenv(blk, "HTTP_%s=%s",
ri->http_headers[i].name, ri->http_headers[i].value);
// Convert variable name into uppercase, and change - to _
for (; *p != '=' && *p != '\0'; p++) {
if (*p == '-')
*p = '_';
*p = (char) toupper(* (unsigned char *) p);
}
}
blk->vars[blk->nvars++] = NULL;
blk->buf[blk->len++] = '\0';
assert(blk->nvars < (int) ARRAY_SIZE(blk->vars));
assert(blk->len > 0);
assert(blk->len < (int) sizeof(blk->buf));
}
static const char cgi_status[] = "HTTP/1.1 200 OK\r\n";
static void open_cgi_endpoint(struct connection *conn, const char *prog) {
struct cgi_env_block blk;
char dir[MAX_PATH_SIZE];
const char *p;
sock_t fds[2];
prepare_cgi_environment(conn, prog, &blk);
// CGI must be executed in its own directory. 'dir' must point to the
// directory containing executable program, 'p' must point to the
// executable program name relative to 'dir'.
if ((p = strrchr(prog, '/')) == NULL) {
mg_snprintf(dir, sizeof(dir), "%s", ".");
} else {
mg_snprintf(dir, sizeof(dir), "%.*s", (int) (p - prog), prog);
}
// Try to create socketpair in a loop until success. ns_socketpair()
// can be interrupted by a signal and fail.
// TODO(lsm): use sigaction to restart interrupted syscall
do {
ns_socketpair(fds);
} while (fds[0] == INVALID_SOCKET);
if (start_process(conn->server->config_options[CGI_INTERPRETER],
prog, blk.buf, blk.vars, dir, fds[1]) != 0) {
conn->endpoint_type = EP_CGI;
conn->endpoint.nc = ns_add_sock(&conn->server->ns_mgr, fds[0],
mg_ev_handler, conn);
conn->endpoint.nc->flags |= MG_CGI_CONN;
ns_send(conn->ns_conn, cgi_status, sizeof(cgi_status) - 1);
conn->mg_conn.status_code = 200;
conn->ns_conn->flags |= NSF_BUFFER_BUT_DONT_SEND;
// Pass POST data to the CGI process
conn->endpoint.nc->send_iobuf = conn->ns_conn->recv_iobuf;
iobuf_init(&conn->ns_conn->recv_iobuf, 0);
} else {
closesocket(fds[0]);
send_http_error(conn, 500, "start_process(%s) failed", prog);
}
#ifndef _WIN32
closesocket(fds[1]); // On Windows, CGI stdio thread closes that socket
#endif
}
static void on_cgi_data(struct ns_connection *nc) {
struct connection *conn = (struct connection *) nc->user_data;
const char *status = "500";
struct mg_connection c;
if (!conn) return;
// Copy CGI data from CGI socket to the client send buffer
ns_forward(nc, conn->ns_conn);
// If reply has not been parsed yet, parse it
if (conn->ns_conn->flags & NSF_BUFFER_BUT_DONT_SEND) {
struct iobuf *io = &conn->ns_conn->send_iobuf;
int s_len = sizeof(cgi_status) - 1;
int len = get_request_len(io->buf + s_len, io->len - s_len);
char buf[MAX_REQUEST_SIZE], *s = buf;
if (len == 0) return;
if (len < 0 || len > (int) sizeof(buf)) {
len = io->len;
iobuf_remove(io, io->len);
send_http_error(conn, 500, "CGI program sent malformed headers: [%.*s]",
len, io->buf);
} else {
memset(&c, 0, sizeof(c));
memcpy(buf, io->buf + s_len, len);
buf[len - 1] = '\0';
parse_http_headers(&s, &c);
if (mg_get_header(&c, "Location") != NULL) {
status = "302";
} else if ((status = (char *) mg_get_header(&c, "Status")) == NULL) {
status = "200";
}
memcpy(io->buf + 9, status, 3);
conn->mg_conn.status_code = atoi(status);
}
conn->ns_conn->flags &= ~NSF_BUFFER_BUT_DONT_SEND;
}
}
#endif // !MONGOOSE_NO_CGI
static char *mg_strdup(const char *str) {
char *copy = (char *) NS_MALLOC(strlen(str) + 1);
if (copy != NULL) {
strcpy(copy, str);
}
return copy;
}
static int isbyte(int n) {
return n >= 0 && n <= 255;
}
static int parse_net(const char *spec, uint32_t *net, uint32_t *mask) {
int n, a, b, c, d, slash = 32, len = 0;
if ((sscanf(spec, "%d.%d.%d.%d/%d%n", &a, &b, &c, &d, &slash, &n) == 5 ||
sscanf(spec, "%d.%d.%d.%d%n", &a, &b, &c, &d, &n) == 4) &&
isbyte(a) && isbyte(b) && isbyte(c) && isbyte(d) &&
slash >= 0 && slash < 33) {
len = n;
*net = ((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | d;
*mask = slash ? 0xffffffffU << (32 - slash) : 0;
}
return len;
}
// Verify given socket address against the ACL.
// Return -1 if ACL is malformed, 0 if address is disallowed, 1 if allowed.
static int check_acl(const char *acl, uint32_t remote_ip) {
int allowed, flag;
uint32_t net, mask;
struct vec vec;
// If any ACL is set, deny by default
allowed = acl == NULL ? '+' : '-';
while ((acl = next_option(acl, &vec, NULL)) != NULL) {
flag = vec.ptr[0];
if ((flag != '+' && flag != '-') ||
parse_net(&vec.ptr[1], &net, &mask) == 0) {
return -1;
}
if (net == (remote_ip & mask)) {
allowed = flag;
}
}
return allowed == '+';
}
// Protect against directory disclosure attack by removing '..',
// excessive '/' and '\' characters
static void remove_double_dots_and_double_slashes(char *s) {
char *p = s;
while (*s != '\0') {
*p++ = *s++;
if (s[-1] == '/' || s[-1] == '\\') {
// Skip all following slashes, backslashes and double-dots
while (s[0] != '\0') {
if (s[0] == '/' || s[0] == '\\') { s++; }
else if (s[0] == '.' && s[1] == '.') { s += 2; }
else { break; }
}
}
}
*p = '\0';
}
int mg_url_decode(const char *src, int src_len, char *dst,
int dst_len, int is_form_url_encoded) {
int i, j, a, b;
#define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W')
for (i = j = 0; i < src_len && j < dst_len - 1; i++, j++) {
if (src[i] == '%' && i < src_len - 2 &&
isxdigit(* (const unsigned char *) (src + i + 1)) &&
isxdigit(* (const unsigned char *) (src + i + 2))) {
a = tolower(* (const unsigned char *) (src + i + 1));
b = tolower(* (const unsigned char *) (src + i + 2));
dst[j] = (char) ((HEXTOI(a) << 4) | HEXTOI(b));
i += 2;
} else if (is_form_url_encoded && src[i] == '+') {
dst[j] = ' ';
} else {
dst[j] = src[i];
}
}
dst[j] = '\0'; // Null-terminate the destination
return i >= src_len ? j : -1;
}
static int is_valid_http_method(const char *s) {
return !strcmp(s, "GET") || !strcmp(s, "POST") || !strcmp(s, "HEAD") ||
!strcmp(s, "CONNECT") || !strcmp(s, "PUT") || !strcmp(s, "DELETE") ||
!strcmp(s, "OPTIONS") || !strcmp(s, "PROPFIND") || !strcmp(s, "MKCOL");
}
// Parse HTTP request, fill in mg_request structure.
// This function modifies the buffer by NUL-terminating
// HTTP request components, header names and header values.
// Note that len must point to the last \n of HTTP headers.
static int parse_http_message(char *buf, int len, struct mg_connection *ri) {
int is_request, n;
// Reset the connection. Make sure that we don't touch fields that are
// set elsewhere: remote_ip, remote_port, server_param
ri->request_method = ri->uri = ri->http_version = ri->query_string = NULL;
ri->num_headers = ri->status_code = ri->is_websocket = ri->content_len = 0;
buf[len - 1] = '\0';
// RFC says that all initial whitespaces should be ingored
while (*buf != '\0' && isspace(* (unsigned char *) buf)) {
buf++;
}
ri->request_method = skip(&buf, " ");
ri->uri = skip(&buf, " ");
ri->http_version = skip(&buf, "\r\n");
// HTTP message could be either HTTP request or HTTP response, e.g.
// "GET / HTTP/1.0 ...." or "HTTP/1.0 200 OK ..."
is_request = is_valid_http_method(ri->request_method);
if ((is_request && memcmp(ri->http_version, "HTTP/", 5) != 0) ||
(!is_request && memcmp(ri->request_method, "HTTP/", 5) != 0)) {
len = -1;
} else {
if (is_request) {
ri->http_version += 5;
} else {
ri->status_code = atoi(ri->uri);
}
parse_http_headers(&buf, ri);
if ((ri->query_string = strchr(ri->uri, '?')) != NULL) {
*(char *) ri->query_string++ = '\0';
}
n = (int) strlen(ri->uri);
mg_url_decode(ri->uri, n, (char *) ri->uri, n + 1, 0);
if (*ri->uri == '/' || *ri->uri == '.') {
remove_double_dots_and_double_slashes((char *) ri->uri);
}
}
return len;
}
static int lowercase(const char *s) {
return tolower(* (const unsigned char *) s);
}
static int mg_strcasecmp(const char *s1, const char *s2) {
int diff;
do {
diff = lowercase(s1++) - lowercase(s2++);
} while (diff == 0 && s1[-1] != '\0');
return diff;
}
static int mg_strncasecmp(const char *s1, const char *s2, size_t len) {
int diff = 0;
if (len > 0)
do {
diff = lowercase(s1++) - lowercase(s2++);
} while (diff == 0 && s1[-1] != '\0' && --len > 0);
return diff;
}
// Return HTTP header value, or NULL if not found.
const char *mg_get_header(const struct mg_connection *ri, const char *s) {
int i;
for (i = 0; i < ri->num_headers; i++)
if (!mg_strcasecmp(s, ri->http_headers[i].name))
return ri->http_headers[i].value;
return NULL;
}
// Perform case-insensitive match of string against pattern
int mg_match_prefix(const char *pattern, int pattern_len, const char *str) {
const char *or_str;
int len, res, i = 0, j = 0;
if ((or_str = (const char *) memchr(pattern, '|', pattern_len)) != NULL) {
res = mg_match_prefix(pattern, or_str - pattern, str);
return res > 0 ? res : mg_match_prefix(or_str + 1,
(pattern + pattern_len) - (or_str + 1), str);
}
for (; i < pattern_len; i++, j++) {
if (pattern[i] == '?' && str[j] != '\0') {
continue;
} else if (pattern[i] == '$') {
return str[j] == '\0' ? j : -1;
} else if (pattern[i] == '*') {
i++;
if (pattern[i] == '*') {
i++;
len = (int) strlen(str + j);
} else {
len = (int) strcspn(str + j, "/");
}
if (i == pattern_len) {
return j + len;
}
do {
res = mg_match_prefix(pattern + i, pattern_len - i, str + j + len);
} while (res == -1 && len-- > 0);
return res == -1 ? -1 : j + res + len;
} else if (lowercase(&pattern[i]) != lowercase(&str[j])) {
return -1;
}
}
return j;
}
// This function prints HTML pages, and expands "{{something}}" blocks
// inside HTML by calling appropriate callback functions.
// Note that {{@path/to/file}} construct outputs embedded file's contents,
// which provides SSI-like functionality.
void mg_template(struct mg_connection *conn, const char *s,
struct mg_expansion *expansions) {
int i, j, pos = 0, inside_marker = 0;
for (i = 0; s[i] != '\0'; i++) {
if (inside_marker == 0 && !memcmp(&s[i], "{{", 2)) {
if (i > pos) {
mg_send_data(conn, &s[pos], i - pos);
}
pos = i;
inside_marker = 1;
}
if (inside_marker == 1 && !memcmp(&s[i], "}}", 2)) {
for (j = 0; expansions[j].keyword != NULL; j++) {
const char *kw = expansions[j].keyword;
if ((int) strlen(kw) == i - (pos + 2) &&
memcmp(kw, &s[pos + 2], i - (pos + 2)) == 0) {
expansions[j].handler(conn);
pos = i + 2;
break;
}
}
inside_marker = 0;
}
}
if (i > pos) {
mg_send_data(conn, &s[pos], i - pos);
}
}
#ifndef MONGOOSE_NO_FILESYSTEM
static int must_hide_file(struct connection *conn, const char *path) {
const char *pw_pattern = "**" PASSWORDS_FILE_NAME "$";
const char *pattern = conn->server->config_options[HIDE_FILES_PATTERN];
return mg_match_prefix(pw_pattern, strlen(pw_pattern), path) > 0 ||
(pattern != NULL && mg_match_prefix(pattern, strlen(pattern), path) > 0);
}
// Return 1 if real file has been found, 0 otherwise
static int convert_uri_to_file_name(struct connection *conn, char *buf,
size_t buf_len, file_stat_t *st) {
struct vec a, b;
const char *rewrites = conn->server->config_options[URL_REWRITES];
const char *root = conn->server->config_options[DOCUMENT_ROOT];
#ifndef MONGOOSE_NO_CGI
const char *cgi_pat = conn->server->config_options[CGI_PATTERN];
const size_t cgi_pat_len = strlen(cgi_pat);
char *p;
#endif
const char *uri = conn->mg_conn.uri;
const char *domain = mg_get_header(&conn->mg_conn, "Host");
int match_len, root_len = root == NULL ? 0 : strlen(root);
// Perform virtual hosting rewrites
if (rewrites != NULL && domain != NULL) {
const char *colon = strchr(domain, ':');
int domain_len = colon == NULL ? (int) strlen(domain) : colon - domain;
while ((rewrites = next_option(rewrites, &a, &b)) != NULL) {
if (a.len > 1 && a.ptr[0] == '@' && a.len == domain_len + 1 &&
mg_strncasecmp(a.ptr + 1, domain, domain_len) == 0) {
root = b.ptr;
root_len = b.len;
break;
}
}
}
// No filesystem access
if (root == NULL || root_len == 0) return 0;
// Handle URL rewrites
mg_snprintf(buf, buf_len, "%.*s%s", root_len, root, uri);
rewrites = conn->server->config_options[URL_REWRITES]; // Re-initialize!
while ((rewrites = next_option(rewrites, &a, &b)) != NULL) {
if ((match_len = mg_match_prefix(a.ptr, a.len, uri)) > 0) {
mg_snprintf(buf, buf_len, "%.*s%s", (int) b.len, b.ptr, uri + match_len);
break;
}
}
if (stat(buf, st) == 0) return 1;
#ifndef MONGOOSE_NO_CGI
// Support PATH_INFO for CGI scripts.
for (p = buf + strlen(root) + 2; *p != '\0'; p++) {
if (*p == '/') {
*p = '\0';
if (mg_match_prefix(cgi_pat, cgi_pat_len, buf) > 0 &&
!stat(buf, st)) {
DBG(("!!!! [%s]", buf));
*p = '/';
conn->path_info = mg_strdup(p);
*p = '\0';
return 1;
}
*p = '/';
}
}
#endif
return 0;
}
#endif // MONGOOSE_NO_FILESYSTEM
static int should_keep_alive(const struct mg_connection *conn) {
struct connection *c = MG_CONN_2_CONN(conn);
const char *method = conn->request_method;
const char *http_version = conn->http_version;
const char *header = mg_get_header(conn, "Connection");
return method != NULL &&
(!strcmp(method, "GET") || c->endpoint_type == EP_USER) &&
((header != NULL && !mg_strcasecmp(header, "keep-alive")) ||
(header == NULL && http_version && !strcmp(http_version, "1.1")));
}
size_t mg_write(struct mg_connection *c, const void *buf, int len) {
struct connection *conn = MG_CONN_2_CONN(c);
ns_send(conn->ns_conn, buf, len);
return conn->ns_conn->send_iobuf.len;
}
void mg_send_status(struct mg_connection *c, int status) {
if (c->status_code == 0) {
c->status_code = status;
mg_printf(c, "HTTP/1.1 %d %s\r\n", status, status_code_to_str(status));
}
}
void mg_send_header(struct mg_connection *c, const char *name, const char *v) {
if (c->status_code == 0) {
c->status_code = 200;
mg_printf(c, "HTTP/1.1 %d %s\r\n", 200, status_code_to_str(200));
}
mg_printf(c, "%s: %s\r\n", name, v);
}
static void terminate_headers(struct mg_connection *c) {
struct connection *conn = MG_CONN_2_CONN(c);
if (!(conn->ns_conn->flags & MG_HEADERS_SENT)) {
mg_send_header(c, "Transfer-Encoding", "chunked");
mg_write(c, "\r\n", 2);
conn->ns_conn->flags |= MG_HEADERS_SENT;
}
}
size_t mg_send_data(struct mg_connection *c, const void *data, int data_len) {
struct connection *conn = MG_CONN_2_CONN(c);
terminate_headers(c);
write_chunk(MG_CONN_2_CONN(c), (const char *) data, data_len);
return conn->ns_conn->send_iobuf.len;
}
size_t mg_printf_data(struct mg_connection *c, const char *fmt, ...) {
struct connection *conn = MG_CONN_2_CONN(c);
va_list ap;
int len;
char mem[IOBUF_SIZE], *buf = mem;
terminate_headers(c);
va_start(ap, fmt);
len = ns_avprintf(&buf, sizeof(mem), fmt, ap);
va_end(ap);
if (len >= 0) {
write_chunk((struct connection *) conn, buf, len);
}
if (buf != mem && buf != NULL) {
NS_FREE(buf);
}
return conn->ns_conn->send_iobuf.len;
}
#if !defined(MONGOOSE_NO_WEBSOCKET) || !defined(MONGOOSE_NO_AUTH)
static int is_big_endian(void) {
static const int n = 1;
return ((char *) &n)[0] == 0;
}
#endif
#ifndef MONGOOSE_NO_WEBSOCKET
// START OF SHA-1 code
// Copyright(c) By Steve Reid <[email protected]>
#define SHA1HANDSOFF
#if defined(__sun)
#include "solarisfixes.h"
#endif
union char64long16 { unsigned char c[64]; uint32_t l[16]; };
#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
static uint32_t blk0(union char64long16 *block, int i) {
// Forrest: SHA expect BIG_ENDIAN, swap if LITTLE_ENDIAN
if (!is_big_endian()) {
block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00) |
(rol(block->l[i], 8) & 0x00FF00FF);
}
return block->l[i];
}
/* Avoid redefine warning (ARM /usr/include/sys/ucontext.h define R0~R4) */
#undef blk
#undef R0
#undef R1
#undef R2
#undef R3
#undef R4
#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
^block->l[(i+2)&15]^block->l[i&15],1))
#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(block, i)+0x5A827999+rol(v,5);w=rol(w,30);
#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
typedef struct {
uint32_t state[5];
uint32_t count[2];
unsigned char buffer[64];
} SHA1_CTX;
static void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]) {
uint32_t a, b, c, d, e;
union char64long16 block[1];
memcpy(block, buffer, 64);
a = state[0];
b = state[1];
c = state[2];
d = state[3];
e = state[4];
R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
state[4] += e;
// Erase working structures. The order of operations is important,
// used to ensure that compiler doesn't optimize those out.
memset(block, 0, sizeof(block));
a = b = c = d = e = 0;
(void) a; (void) b; (void) c; (void) d; (void) e;
}
static void SHA1Init(SHA1_CTX *context) {
context->state[0] = 0x67452301;
context->state[1] = 0xEFCDAB89;
context->state[2] = 0x98BADCFE;
context->state[3] = 0x10325476;
context->state[4] = 0xC3D2E1F0;
context->count[0] = context->count[1] = 0;
}
static void SHA1Update(SHA1_CTX *context, const unsigned char *data,
uint32_t len) {
uint32_t i, j;
j = context->count[0];
if ((context->count[0] += len << 3) < j)
context->count[1]++;
context->count[1] += (len>>29);
j = (j >> 3) & 63;
if ((j + len) > 63) {
memcpy(&context->buffer[j], data, (i = 64-j));
SHA1Transform(context->state, context->buffer);
for ( ; i + 63 < len; i += 64) {
SHA1Transform(context->state, &data[i]);
}
j = 0;
}
else i = 0;
memcpy(&context->buffer[j], &data[i], len - i);
}
static void SHA1Final(unsigned char digest[20], SHA1_CTX *context) {
unsigned i;
unsigned char finalcount[8], c;
for (i = 0; i < 8; i++) {
finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)]
>> ((3-(i & 3)) * 8) ) & 255);
}
c = 0200;
SHA1Update(context, &c, 1);
while ((context->count[0] & 504) != 448) {
c = 0000;
SHA1Update(context, &c, 1);
}
SHA1Update(context, finalcount, 8);
for (i = 0; i < 20; i++) {
digest[i] = (unsigned char)
((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
}
memset(context, '\0', sizeof(*context));
memset(&finalcount, '\0', sizeof(finalcount));
}
// END OF SHA1 CODE
static void base64_encode(const unsigned char *src, int src_len, char *dst) {
static const char *b64 =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
int i, j, a, b, c;
for (i = j = 0; i < src_len; i += 3) {
a = src[i];
b = i + 1 >= src_len ? 0 : src[i + 1];
c = i + 2 >= src_len ? 0 : src[i + 2];
dst[j++] = b64[a >> 2];
dst[j++] = b64[((a & 3) << 4) | (b >> 4)];
if (i + 1 < src_len) {
dst[j++] = b64[(b & 15) << 2 | (c >> 6)];
}
if (i + 2 < src_len) {
dst[j++] = b64[c & 63];
}
}
while (j % 4 != 0) {
dst[j++] = '=';
}
dst[j++] = '\0';
}
static void send_websocket_handshake(struct mg_connection *conn,
const char *key) {
static const char *magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
char buf[500], sha[20], b64_sha[sizeof(sha) * 2];
SHA1_CTX sha_ctx;
mg_snprintf(buf, sizeof(buf), "%s%s", key, magic);
SHA1Init(&sha_ctx);
SHA1Update(&sha_ctx, (unsigned char *) buf, strlen(buf));
SHA1Final((unsigned char *) sha, &sha_ctx);
base64_encode((unsigned char *) sha, sizeof(sha), b64_sha);
mg_snprintf(buf, sizeof(buf), "%s%s%s",
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: ", b64_sha, "\r\n\r\n");
mg_write(conn, buf, strlen(buf));
}
static int deliver_websocket_frame(struct connection *conn) {
// Having buf unsigned char * is important, as it is used below in arithmetic
unsigned char *buf = (unsigned char *) conn->ns_conn->recv_iobuf.buf;
int i, len, buf_len = conn->ns_conn->recv_iobuf.len, frame_len = 0,
mask_len = 0, header_len = 0, data_len = 0, buffered = 0;
if (buf_len >= 2) {
len = buf[1] & 127;
mask_len = buf[1] & 128 ? 4 : 0;
if (len < 126 && buf_len >= mask_len) {
data_len = len;
header_len = 2 + mask_len;
} else if (len == 126 && buf_len >= 4 + mask_len) {
header_len = 4 + mask_len;
data_len = ((((int) buf[2]) << 8) + buf[3]);
} else if (buf_len >= 10 + mask_len) {
header_len = 10 + mask_len;
data_len = (int) (((uint64_t) htonl(* (uint32_t *) &buf[2])) << 32) +
htonl(* (uint32_t *) &buf[6]);
}
}
frame_len = header_len + data_len;
buffered = frame_len > 0 && frame_len <= buf_len;
if (buffered) {
conn->mg_conn.content_len = data_len;
conn->mg_conn.content = (char *) buf + header_len;
conn->mg_conn.wsbits = buf[0];
// Apply mask if necessary
if (mask_len > 0) {
for (i = 0; i < data_len; i++) {
buf[i + header_len] ^= (buf + header_len - mask_len)[i % 4];
}
}
// Call the handler and remove frame from the iobuf
if (call_user(conn, MG_REQUEST) == MG_FALSE) {
conn->ns_conn->flags |= NSF_FINISHED_SENDING_DATA;
}
iobuf_remove(&conn->ns_conn->recv_iobuf, frame_len);
}
return buffered;
}
size_t mg_websocket_write(struct mg_connection *conn, int opcode,
const char *data, size_t data_len) {
unsigned char mem[4192], *copy = mem;
size_t copy_len = 0;
if (data_len + 10 > sizeof(mem) &&
(copy = (unsigned char *) NS_MALLOC(data_len + 10)) == NULL) {
return 0;
}
copy[0] = 0x80 + (opcode & 0x0f);
// Frame format: http://tools.ietf.org/html/rfc6455#section-5.2
if (data_len < 126) {
// Inline 7-bit length field
copy[1] = data_len;
memcpy(copy + 2, data, data_len);
copy_len = 2 + data_len;
} else if (data_len <= 0xFFFF) {
// 16-bit length field
copy[1] = 126;
* (uint16_t *) (copy + 2) = (uint16_t) htons((uint16_t) data_len);
memcpy(copy + 4, data, data_len);
copy_len = 4 + data_len;
} else {
// 64-bit length field
copy[1] = 127;
const uint32_t hi = htonl((uint32_t) ((uint64_t) data_len >> 32));
const uint32_t lo = htonl(data_len & 0xffffffff);
memcpy(copy+2,&hi,sizeof(hi));
memcpy(copy+6,&lo,sizeof(lo));
memcpy(copy + 10, data, data_len);
copy_len = 10 + data_len;
}
if (copy_len > 0) {
mg_write(conn, copy, copy_len);
}
if (copy != mem) {
NS_FREE(copy);
}
// If we send closing frame, schedule a connection to be closed after
// data is drained to the client.
if (opcode == WEBSOCKET_OPCODE_CONNECTION_CLOSE) {
MG_CONN_2_CONN(conn)->ns_conn->flags |= NSF_FINISHED_SENDING_DATA;
}
return MG_CONN_2_CONN(conn)->ns_conn->send_iobuf.len;
}
size_t mg_websocket_printf(struct mg_connection *conn, int opcode,
const char *fmt, ...) {
char mem[4192], *buf = mem;
va_list ap;
int len;
va_start(ap, fmt);
if ((len = ns_avprintf(&buf, sizeof(mem), fmt, ap)) > 0) {
mg_websocket_write(conn, opcode, buf, len);
}
va_end(ap);
if (buf != mem && buf != NULL) {
NS_FREE(buf);
}
return MG_CONN_2_CONN(conn)->ns_conn->send_iobuf.len;
}
static void send_websocket_handshake_if_requested(struct mg_connection *conn) {
const char *ver = mg_get_header(conn, "Sec-WebSocket-Version"),
*key = mg_get_header(conn, "Sec-WebSocket-Key");
if (ver != NULL && key != NULL) {
conn->is_websocket = 1;
if (call_user(MG_CONN_2_CONN(conn), MG_WS_HANDSHAKE) == MG_FALSE) {
send_websocket_handshake(conn, key);
}
call_user(MG_CONN_2_CONN(conn), MG_WS_CONNECT);
}
}
static void ping_idle_websocket_connection(struct connection *conn, time_t t) {
if (t - conn->ns_conn->last_io_time > MONGOOSE_USE_WEBSOCKET_PING_INTERVAL) {
mg_websocket_write(&conn->mg_conn, WEBSOCKET_OPCODE_PING, "", 0);
}
}
#else
#define ping_idle_websocket_connection(conn, t)
#endif // !MONGOOSE_NO_WEBSOCKET
static void write_terminating_chunk(struct connection *conn) {
mg_write(&conn->mg_conn, "0\r\n\r\n", 5);
}
static int call_request_handler(struct connection *conn) {
int result;
conn->mg_conn.content = conn->ns_conn->recv_iobuf.buf;
if ((result = call_user(conn, MG_REQUEST)) == MG_TRUE) {
if (conn->ns_conn->flags & MG_HEADERS_SENT) {
write_terminating_chunk(conn);
}
close_local_endpoint(conn);
}
return result;
}
const char *mg_get_mime_type(const char *path, const char *default_mime_type) {
const char *ext;
size_t i, path_len;
path_len = strlen(path);
for (i = 0; static_builtin_mime_types[i].extension != NULL; i++) {
ext = path + (path_len - static_builtin_mime_types[i].ext_len);
if (path_len > static_builtin_mime_types[i].ext_len &&
mg_strcasecmp(ext, static_builtin_mime_types[i].extension) == 0) {
return static_builtin_mime_types[i].mime_type;
}
}
return default_mime_type;
}
#ifndef MONGOOSE_NO_FILESYSTEM
// Convert month to the month number. Return -1 on error, or month number
static int get_month_index(const char *s) {
static const char *month_names[] = {
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
};
int i;
for (i = 0; i < (int) ARRAY_SIZE(month_names); i++)
if (!strcmp(s, month_names[i]))
return i;
return -1;
}
static int num_leap_years(int year) {
return year / 4 - year / 100 + year / 400;
}
// Parse UTC date-time string, and return the corresponding time_t value.
static time_t parse_date_string(const char *datetime) {
static const unsigned short days_before_month[] = {
0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
};
char month_str[32];
int second, minute, hour, day, month, year, leap_days, days;
time_t result = (time_t) 0;
if (((sscanf(datetime, "%d/%3s/%d %d:%d:%d",
&day, month_str, &year, &hour, &minute, &second) == 6) ||
(sscanf(datetime, "%d %3s %d %d:%d:%d",
&day, month_str, &year, &hour, &minute, &second) == 6) ||
(sscanf(datetime, "%*3s, %d %3s %d %d:%d:%d",
&day, month_str, &year, &hour, &minute, &second) == 6) ||
(sscanf(datetime, "%d-%3s-%d %d:%d:%d",
&day, month_str, &year, &hour, &minute, &second) == 6)) &&
year > 1970 &&
(month = get_month_index(month_str)) != -1) {
leap_days = num_leap_years(year) - num_leap_years(1970);
year -= 1970;
days = year * 365 + days_before_month[month] + (day - 1) + leap_days;
result = days * 24 * 3600 + hour * 3600 + minute * 60 + second;
}
return result;
}
// Look at the "path" extension and figure what mime type it has.
// Store mime type in the vector.
static void get_mime_type(const struct mg_server *server, const char *path,
struct vec *vec) {
struct vec ext_vec, mime_vec;
const char *list, *ext;
size_t path_len;
path_len = strlen(path);
// Scan user-defined mime types first, in case user wants to
// override default mime types.
list = server->config_options[EXTRA_MIME_TYPES];
while ((list = next_option(list, &ext_vec, &mime_vec)) != NULL) {
// ext now points to the path suffix
ext = path + path_len - ext_vec.len;
if (mg_strncasecmp(ext, ext_vec.ptr, ext_vec.len) == 0) {
*vec = mime_vec;
return;
}
}
vec->ptr = mg_get_mime_type(path, "text/plain");
vec->len = strlen(vec->ptr);
}
static const char *suggest_connection_header(const struct mg_connection *conn) {
return should_keep_alive(conn) ? "keep-alive" : "close";
}
static void construct_etag(char *buf, size_t buf_len, const file_stat_t *st) {
mg_snprintf(buf, buf_len, "\"%lx.%" INT64_FMT "\"",
(unsigned long) st->st_mtime, (int64_t) st->st_size);
}
// Return True if we should reply 304 Not Modified.
static int is_not_modified(const struct connection *conn,
const file_stat_t *stp) {
char etag[64];
const char *ims = mg_get_header(&conn->mg_conn, "If-Modified-Since");
const char *inm = mg_get_header(&conn->mg_conn, "If-None-Match");
construct_etag(etag, sizeof(etag), stp);
return (inm != NULL && !mg_strcasecmp(etag, inm)) ||
(ims != NULL && stp->st_mtime <= parse_date_string(ims));
}
// For given directory path, substitute it to valid index file.
// Return 0 if index file has been found, -1 if not found.
// If the file is found, it's stats is returned in stp.
static int find_index_file(struct connection *conn, char *path,
size_t path_len, file_stat_t *stp) {
const char *list = conn->server->config_options[INDEX_FILES];
file_stat_t st;
struct vec filename_vec;
size_t n = strlen(path), found = 0;
// The 'path' given to us points to the directory. Remove all trailing
// directory separator characters from the end of the path, and
// then append single directory separator character.
while (n > 0 && path[n - 1] == '/') {
n--;
}
path[n] = '/';
// Traverse index files list. For each entry, append it to the given
// path and see if the file exists. If it exists, break the loop
while ((list = next_option(list, &filename_vec, NULL)) != NULL) {
// Ignore too long entries that may overflow path buffer
if (filename_vec.len > (int) (path_len - (n + 2)))
continue;
// Prepare full path to the index file
strncpy(path + n + 1, filename_vec.ptr, filename_vec.len);
path[n + 1 + filename_vec.len] = '\0';
//DBG(("[%s]", path));
// Does it exist?
if (!stat(path, &st)) {
// Yes it does, break the loop
*stp = st;
found = 1;
break;
}
}
// If no index file exists, restore directory path
if (!found) {
path[n] = '\0';
}
return found;
}
static int parse_range_header(const char *header, int64_t *a, int64_t *b) {
return sscanf(header, "bytes=%" INT64_FMT "-%" INT64_FMT, a, b);
}
static void gmt_time_string(char *buf, size_t buf_len, time_t *t) {
strftime(buf, buf_len, "%a, %d %b %Y %H:%M:%S GMT", gmtime(t));
}
static void open_file_endpoint(struct connection *conn, const char *path,
file_stat_t *st, const char *extra_headers) {
char date[64], lm[64], etag[64], range[64], headers[1000];
const char *msg = "OK", *hdr;
time_t curtime = time(NULL);
int64_t r1, r2;
struct vec mime_vec;
int n;
conn->endpoint_type = EP_FILE;
ns_set_close_on_exec(conn->endpoint.fd);
conn->mg_conn.status_code = 200;
get_mime_type(conn->server, path, &mime_vec);
conn->cl = st->st_size;
range[0] = '\0';
// If Range: header specified, act accordingly
r1 = r2 = 0;
hdr = mg_get_header(&conn->mg_conn, "Range");
if (hdr != NULL && (n = parse_range_header(hdr, &r1, &r2)) > 0 &&
r1 >= 0 && r2 >= 0) {
conn->mg_conn.status_code = 206;
conn->cl = n == 2 ? (r2 > conn->cl ? conn->cl : r2) - r1 + 1: conn->cl - r1;
mg_snprintf(range, sizeof(range), "Content-Range: bytes "
"%" INT64_FMT "-%" INT64_FMT "/%" INT64_FMT "\r\n",
r1, r1 + conn->cl - 1, (int64_t) st->st_size);
msg = "Partial Content";
lseek(conn->endpoint.fd, r1, SEEK_SET);
}
// Prepare Etag, Date, Last-Modified headers. Must be in UTC, according to
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3
gmt_time_string(date, sizeof(date), &curtime);
gmt_time_string(lm, sizeof(lm), &st->st_mtime);
construct_etag(etag, sizeof(etag), st);
n = mg_snprintf(headers, sizeof(headers),
"HTTP/1.1 %d %s\r\n"
"Date: %s\r\n"
"Last-Modified: %s\r\n"
"Etag: %s\r\n"
"Content-Type: %.*s\r\n"
"Content-Length: %" INT64_FMT "\r\n"
"Connection: %s\r\n"
"Accept-Ranges: bytes\r\n"
"%s%s%s\r\n",
conn->mg_conn.status_code, msg, date, lm, etag,
(int) mime_vec.len, mime_vec.ptr, conn->cl,
suggest_connection_header(&conn->mg_conn),
range, extra_headers == NULL ? "" : extra_headers,
MONGOOSE_USE_EXTRA_HTTP_HEADERS);
ns_send(conn->ns_conn, headers, n);
if (!strcmp(conn->mg_conn.request_method, "HEAD")) {
conn->ns_conn->flags |= NSF_FINISHED_SENDING_DATA;
close(conn->endpoint.fd);
conn->endpoint_type = EP_NONE;
}
}
void mg_send_file_data(struct mg_connection *c, int fd) {
struct connection *conn = MG_CONN_2_CONN(c);
conn->endpoint_type = EP_FILE;
conn->endpoint.fd = fd;
ns_set_close_on_exec(conn->endpoint.fd);
}
#endif // MONGOOSE_NO_FILESYSTEM
static void call_request_handler_if_data_is_buffered(struct connection *conn) {
#ifndef MONGOOSE_NO_WEBSOCKET
if (conn->mg_conn.is_websocket) {
do { } while (deliver_websocket_frame(conn));
} else
#endif
if (conn->num_bytes_recv >= (conn->cl + conn->request_len) &&
call_request_handler(conn) == MG_FALSE) {
open_local_endpoint(conn, 1);
}
}
#if !defined(MONGOOSE_NO_DIRECTORY_LISTING) || !defined(MONGOOSE_NO_DAV)
#ifdef _WIN32
struct dirent {
char d_name[MAX_PATH_SIZE];
};
typedef struct DIR {
HANDLE handle;
WIN32_FIND_DATAW info;
struct dirent result;
} DIR;
// Implementation of POSIX opendir/closedir/readdir for Windows.
static DIR *opendir(const char *name) {
DIR *dir = NULL;
wchar_t wpath[MAX_PATH_SIZE];
DWORD attrs;
if (name == NULL) {
SetLastError(ERROR_BAD_ARGUMENTS);
} else if ((dir = (DIR *) NS_MALLOC(sizeof(*dir))) == NULL) {
SetLastError(ERROR_NOT_ENOUGH_MEMORY);
} else {
to_wchar(name, wpath, ARRAY_SIZE(wpath));
attrs = GetFileAttributesW(wpath);
if (attrs != 0xFFFFFFFF &&
((attrs & FILE_ATTRIBUTE_DIRECTORY) == FILE_ATTRIBUTE_DIRECTORY)) {
(void) wcscat(wpath, L"\\*");
dir->handle = FindFirstFileW(wpath, &dir->info);
dir->result.d_name[0] = '\0';
} else {
NS_FREE(dir);
dir = NULL;
}
}
return dir;
}
static int closedir(DIR *dir) {
int result = 0;
if (dir != NULL) {
if (dir->handle != INVALID_HANDLE_VALUE)
result = FindClose(dir->handle) ? 0 : -1;
NS_FREE(dir);
} else {
result = -1;
SetLastError(ERROR_BAD_ARGUMENTS);
}
return result;
}
static struct dirent *readdir(DIR *dir) {
struct dirent *result = 0;
if (dir) {
if (dir->handle != INVALID_HANDLE_VALUE) {
result = &dir->result;
(void) WideCharToMultiByte(CP_UTF8, 0,
dir->info.cFileName, -1, result->d_name,
sizeof(result->d_name), NULL, NULL);
if (!FindNextFileW(dir->handle, &dir->info)) {
(void) FindClose(dir->handle);
dir->handle = INVALID_HANDLE_VALUE;
}
} else {
SetLastError(ERROR_FILE_NOT_FOUND);
}
} else {
SetLastError(ERROR_BAD_ARGUMENTS);
}
return result;
}
#endif // _WIN32 POSIX opendir/closedir/readdir implementation
static int scan_directory(struct connection *conn, const char *dir,
struct dir_entry **arr) {
char path[MAX_PATH_SIZE];
struct dir_entry *p;
struct dirent *dp;
int arr_size = 0, arr_ind = 0, inc = 100;
DIR *dirp;
*arr = NULL;
if ((dirp = (opendir(dir))) == NULL) return 0;
while ((dp = readdir(dirp)) != NULL) {
// Do not show current dir and hidden files
if (!strcmp(dp->d_name, ".") ||
!strcmp(dp->d_name, "..") ||
must_hide_file(conn, dp->d_name)) {
continue;
}
mg_snprintf(path, sizeof(path), "%s%c%s", dir, '/', dp->d_name);
// Resize the array if nesessary
if (arr_ind >= arr_size) {
if ((p = (struct dir_entry *)
NS_REALLOC(*arr, (inc + arr_size) * sizeof(**arr))) != NULL) {
// Memset new chunk to zero, otherwize st_mtime will have garbage which
// can make strftime() segfault, see
// http://code.google.com/p/mongoose/issues/detail?id=79
memset(p + arr_size, 0, sizeof(**arr) * inc);
*arr = p;
arr_size += inc;
}
}
if (arr_ind < arr_size) {
(*arr)[arr_ind].conn = conn;
(*arr)[arr_ind].file_name = strdup(dp->d_name);
stat(path, &(*arr)[arr_ind].st);
arr_ind++;
}
}
closedir(dirp);
return arr_ind;
}
int mg_url_encode(const char *src, size_t s_len, char *dst, size_t dst_len) {
static const char *dont_escape = "._-$,;~()";
static const char *hex = "0123456789abcdef";
size_t i = 0, j = 0;
for (i = j = 0; dst_len > 0 && i < s_len && j + 2 < dst_len - 1; i++, j++) {
if (isalnum(* (const unsigned char *) (src + i)) ||
strchr(dont_escape, * (const unsigned char *) (src + i)) != NULL) {
dst[j] = src[i];
} else if (j + 3 < dst_len) {
dst[j] = '%';
dst[j + 1] = hex[(* (const unsigned char *) (src + i)) >> 4];
dst[j + 2] = hex[(* (const unsigned char *) (src + i)) & 0xf];
j += 2;
}
}
dst[j] = '\0';
return j;
}
#endif // !NO_DIRECTORY_LISTING || !MONGOOSE_NO_DAV
#ifndef MONGOOSE_NO_DIRECTORY_LISTING
static void print_dir_entry(const struct dir_entry *de) {
char size[64], mod[64], href[MAX_PATH_SIZE * 3];
int64_t fsize = de->st.st_size;
int is_dir = S_ISDIR(de->st.st_mode);
const char *slash = is_dir ? "/" : "";
if (is_dir) {
mg_snprintf(size, sizeof(size), "%s", "[DIRECTORY]");
} else {
// We use (signed) cast below because MSVC 6 compiler cannot
// convert unsigned __int64 to double.
if (fsize < 1024) {
mg_snprintf(size, sizeof(size), "%d", (int) fsize);
} else if (fsize < 0x100000) {
mg_snprintf(size, sizeof(size), "%.1fk", (double) fsize / 1024.0);
} else if (fsize < 0x40000000) {
mg_snprintf(size, sizeof(size), "%.1fM", (double) fsize / 1048576);
} else {
mg_snprintf(size, sizeof(size), "%.1fG", (double) fsize / 1073741824);
}
}
strftime(mod, sizeof(mod), "%d-%b-%Y %H:%M", localtime(&de->st.st_mtime));
mg_url_encode(de->file_name, strlen(de->file_name), href, sizeof(href));
mg_printf_data(&de->conn->mg_conn,
"<tr><td><a href=\"%s%s\">%s%s</a></td>"
"<td> %s</td><td> %s</td></tr>\n",
href, slash, de->file_name, slash, mod, size);
}
// Sort directory entries by size, or name, or modification time.
// On windows, __cdecl specification is needed in case if project is built
// with __stdcall convention. qsort always requires __cdels callback.
static int __cdecl compare_dir_entries(const void *p1, const void *p2) {
const struct dir_entry *a = (const struct dir_entry *) p1,
*b = (const struct dir_entry *) p2;
const char *qs = a->conn->mg_conn.query_string ?
a->conn->mg_conn.query_string : "na";
int cmp_result = 0;
if (S_ISDIR(a->st.st_mode) && !S_ISDIR(b->st.st_mode)) {
return -1; // Always put directories on top
} else if (!S_ISDIR(a->st.st_mode) && S_ISDIR(b->st.st_mode)) {
return 1; // Always put directories on top
} else if (*qs == 'n') {
cmp_result = strcmp(a->file_name, b->file_name);
} else if (*qs == 's') {
cmp_result = a->st.st_size == b->st.st_size ? 0 :
a->st.st_size > b->st.st_size ? 1 : -1;
} else if (*qs == 'd') {
cmp_result = a->st.st_mtime == b->st.st_mtime ? 0 :
a->st.st_mtime > b->st.st_mtime ? 1 : -1;
}
return qs[1] == 'd' ? -cmp_result : cmp_result;
}
static void send_directory_listing(struct connection *conn, const char *dir) {
struct dir_entry *arr = NULL;
int i, num_entries, sort_direction = conn->mg_conn.query_string != NULL &&
conn->mg_conn.query_string[1] == 'd' ? 'a' : 'd';
mg_send_header(&conn->mg_conn, "Transfer-Encoding", "chunked");
mg_send_header(&conn->mg_conn, "Content-Type", "text/html; charset=utf-8");
mg_printf_data(&conn->mg_conn,
"<html><head><title>Index of %s</title>"
"<style>th {text-align: left;}</style></head>"
"<body><h1>Index of %s</h1><pre><table cellpadding=\"0\">"
"<tr><th><a href=\"?n%c\">Name</a></th>"
"<th><a href=\"?d%c\">Modified</a></th>"
"<th><a href=\"?s%c\">Size</a></th></tr>"
"<tr><td colspan=\"3\"><hr></td></tr>",
conn->mg_conn.uri, conn->mg_conn.uri,
sort_direction, sort_direction, sort_direction);
num_entries = scan_directory(conn, dir, &arr);
qsort(arr, num_entries, sizeof(arr[0]), compare_dir_entries);
for (i = 0; i < num_entries; i++) {
print_dir_entry(&arr[i]);
NS_FREE(arr[i].file_name);
}
NS_FREE(arr);
write_terminating_chunk(conn);
close_local_endpoint(conn);
}
#endif // MONGOOSE_NO_DIRECTORY_LISTING
#ifndef MONGOOSE_NO_DAV
static void print_props(struct connection *conn, const char *uri,
file_stat_t *stp) {
char mtime[64];
gmt_time_string(mtime, sizeof(mtime), &stp->st_mtime);
mg_printf(&conn->mg_conn,
"<d:response>"
"<d:href>%s</d:href>"
"<d:propstat>"
"<d:prop>"
"<d:resourcetype>%s</d:resourcetype>"
"<d:getcontentlength>%" INT64_FMT "</d:getcontentlength>"
"<d:getlastmodified>%s</d:getlastmodified>"
"</d:prop>"
"<d:status>HTTP/1.1 200 OK</d:status>"
"</d:propstat>"
"</d:response>\n",
uri, S_ISDIR(stp->st_mode) ? "<d:collection/>" : "",
(int64_t) stp->st_size, mtime);
}
static void handle_propfind(struct connection *conn, const char *path,
file_stat_t *stp, int exists) {
static const char header[] = "HTTP/1.1 207 Multi-Status\r\n"
"Connection: close\r\n"
"Content-Type: text/xml; charset=utf-8\r\n\r\n"
"<?xml version=\"1.0\" encoding=\"utf-8\"?>"
"<d:multistatus xmlns:d='DAV:'>\n";
static const char footer[] = "</d:multistatus>";
const char *depth = mg_get_header(&conn->mg_conn, "Depth");
#ifdef MONGOOSE_NO_DIRECTORY_LISTING
const char *list_dir = "no";
#else
const char *list_dir = conn->server->config_options[ENABLE_DIRECTORY_LISTING];
#endif
conn->mg_conn.status_code = 207;
// Print properties for the requested resource itself
if (!exists) {
conn->mg_conn.status_code = 404;
mg_printf(&conn->mg_conn, "%s", "HTTP/1.1 404 Not Found\r\n\r\n");
} else if (S_ISDIR(stp->st_mode) && mg_strcasecmp(list_dir, "yes") != 0) {
conn->mg_conn.status_code = 403;
mg_printf(&conn->mg_conn, "%s",
"HTTP/1.1 403 Directory Listing Denied\r\n\r\n");
} else {
ns_send(conn->ns_conn, header, sizeof(header) - 1);
print_props(conn, conn->mg_conn.uri, stp);
if (S_ISDIR(stp->st_mode) &&
(depth == NULL || strcmp(depth, "0") != 0)) {
struct dir_entry *arr = NULL;
int i, num_entries = scan_directory(conn, path, &arr);
for (i = 0; i < num_entries; i++) {
char buf[MAX_PATH_SIZE * 3];
struct dir_entry *de = &arr[i];
mg_url_encode(de->file_name, strlen(de->file_name), buf, sizeof(buf));
print_props(conn, buf, &de->st);
NS_FREE(de->file_name);
}
NS_FREE(arr);
}
ns_send(conn->ns_conn, footer, sizeof(footer) - 1);
}
close_local_endpoint(conn);
}
static void handle_mkcol(struct connection *conn, const char *path) {
int status_code = 500;
if (conn->mg_conn.content_len > 0) {
status_code = 415;
} else if (!mkdir(path, 0755)) {
status_code = 201;
} else if (errno == EEXIST) {
status_code = 405;
} else if (errno == EACCES) {
status_code = 403;
} else if (errno == ENOENT) {
status_code = 409;
}
send_http_error(conn, status_code, NULL);
}
static int remove_directory(const char *dir) {
char path[MAX_PATH_SIZE];
struct dirent *dp;
file_stat_t st;
DIR *dirp;
if ((dirp = opendir(dir)) == NULL) return 0;
while ((dp = readdir(dirp)) != NULL) {
if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, "..")) continue;
mg_snprintf(path, sizeof(path), "%s%c%s", dir, '/', dp->d_name);
stat(path, &st);
if (S_ISDIR(st.st_mode)) {
remove_directory(path);
} else {
remove(path);
}
}
closedir(dirp);
rmdir(dir);
return 1;
}
static void handle_delete(struct connection *conn, const char *path) {
file_stat_t st;
if (stat(path, &st) != 0) {
send_http_error(conn, 404, NULL);
} else if (S_ISDIR(st.st_mode)) {
remove_directory(path);
send_http_error(conn, 204, NULL);
} else if (remove(path) == 0) {
send_http_error(conn, 204, NULL);
} else {
send_http_error(conn, 423, NULL);
}
}
// For a given PUT path, create all intermediate subdirectories
// for given path. Return 0 if the path itself is a directory,
// or -1 on error, 1 if OK.
static int put_dir(const char *path) {
char buf[MAX_PATH_SIZE];
const char *s, *p;
file_stat_t st;
// Create intermediate directories if they do not exist
for (s = p = path + 1; (p = strchr(s, '/')) != NULL; s = ++p) {
if (p - path >= (int) sizeof(buf)) return -1; // Buffer overflow
memcpy(buf, path, p - path);
buf[p - path] = '\0';
if (stat(buf, &st) != 0 && mkdir(buf, 0755) != 0) return -1;
if (p[1] == '\0') return 0; // Path is a directory itself
}
return 1;
}
static void handle_put(struct connection *conn, const char *path) {
file_stat_t st;
const char *range, *cl_hdr = mg_get_header(&conn->mg_conn, "Content-Length");
int64_t r1, r2;
int rc;
conn->mg_conn.status_code = !stat(path, &st) ? 200 : 201;
if ((rc = put_dir(path)) == 0) {
mg_printf(&conn->mg_conn, "HTTP/1.1 %d OK\r\n\r\n",
conn->mg_conn.status_code);
close_local_endpoint(conn);
} else if (rc == -1) {
send_http_error(conn, 500, "put_dir: %s", strerror(errno));
} else if (cl_hdr == NULL) {
send_http_error(conn, 411, NULL);
} else if ((conn->endpoint.fd =
open(path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0644)) < 0) {
send_http_error(conn, 500, "open(%s): %s", path, strerror(errno));
} else {
DBG(("PUT [%s] %lu", path, (unsigned long) conn->ns_conn->recv_iobuf.len));
conn->endpoint_type = EP_PUT;
ns_set_close_on_exec(conn->endpoint.fd);
range = mg_get_header(&conn->mg_conn, "Content-Range");
conn->cl = to64(cl_hdr);
r1 = r2 = 0;
if (range != NULL && parse_range_header(range, &r1, &r2) > 0) {
conn->mg_conn.status_code = 206;
lseek(conn->endpoint.fd, r1, SEEK_SET);
conn->cl = r2 > r1 ? r2 - r1 + 1: conn->cl - r1;
}
mg_printf(&conn->mg_conn, "HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n",
conn->mg_conn.status_code);
}
}
static void forward_put_data(struct connection *conn) {
struct iobuf *io = &conn->ns_conn->recv_iobuf;
size_t k = conn->cl < (int64_t) io->len ? conn->cl : (int64_t) io->len; // To write
int n = write(conn->endpoint.fd, io->buf, k); // Write them!
if (n > 0) {
iobuf_remove(io, n);
conn->cl -= n;
}
if (conn->cl <= 0) {
close_local_endpoint(conn);
}
}
#endif // MONGOOSE_NO_DAV
static void send_options(struct connection *conn) {
conn->mg_conn.status_code = 200;
mg_printf(&conn->mg_conn, "%s",
"HTTP/1.1 200 OK\r\nAllow: GET, POST, HEAD, CONNECT, PUT, "
"DELETE, OPTIONS, PROPFIND, MKCOL\r\nDAV: 1\r\n\r\n");
close_local_endpoint(conn);
}
#ifndef MONGOOSE_NO_AUTH
void mg_send_digest_auth_request(struct mg_connection *c) {
struct connection *conn = MG_CONN_2_CONN(c);
c->status_code = 401;
mg_printf(c,
"HTTP/1.1 401 Unauthorized\r\n"
"WWW-Authenticate: Digest qop=\"auth\", "
"realm=\"%s\", nonce=\"%lu\"\r\n\r\n",
conn->server->config_options[AUTH_DOMAIN],
(unsigned long) time(NULL));
close_local_endpoint(conn);
}
// Use the global passwords file, if specified by auth_gpass option,
// or search for .htpasswd in the requested directory.
static FILE *open_auth_file(struct connection *conn, const char *path,
int is_directory) {
char name[MAX_PATH_SIZE];
const char *p, *gpass = conn->server->config_options[GLOBAL_AUTH_FILE];
FILE *fp = NULL;
if (gpass != NULL) {
// Use global passwords file
fp = fopen(gpass, "r");
} else if (is_directory) {
mg_snprintf(name, sizeof(name), "%s%c%s", path, '/', PASSWORDS_FILE_NAME);
fp = fopen(name, "r");
} else {
// Try to find .htpasswd in requested directory.
if ((p = strrchr(path, '/')) == NULL) p = path;
mg_snprintf(name, sizeof(name), "%.*s%c%s",
(int) (p - path), path, '/', PASSWORDS_FILE_NAME);
fp = fopen(name, "r");
}
return fp;
}
#if !defined(HAVE_MD5) && !defined(MONGOOSE_NO_AUTH)
/*
* This code implements the MD5 message-digest algorithm.
* The algorithm is due to Ron Rivest. This code was
* written by Colin Plumb in 1993, no copyright is claimed.
* This code is in the public domain; do with it what you wish.
*
* Equivalent code is available from RSA Data Security, Inc.
* This code has been tested against that, and is equivalent,
* except that you don't need to include two pages of legalese
* with every copy.
*
* To compute the message digest of a chunk of bytes, declare an
* MD5Context structure, pass it to MD5Init, call MD5Update as
* needed on buffers full of bytes, and then call MD5Final, which
* will fill a supplied 16-byte array with the digest.
*/
typedef struct MD5Context {
uint32_t buf[4];
uint32_t bits[2];
unsigned char in[64];
} MD5_CTX;
static void byteReverse(unsigned char *buf, unsigned longs) {
uint32_t t;
// Forrest: MD5 expect LITTLE_ENDIAN, swap if BIG_ENDIAN
if (is_big_endian()) {
do {
t = (uint32_t) ((unsigned) buf[3] << 8 | buf[2]) << 16 |
((unsigned) buf[1] << 8 | buf[0]);
* (uint32_t *) buf = t;
buf += 4;
} while (--longs);
}
}
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
#define MD5STEP(f, w, x, y, z, data, s) \
( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x )
// Start MD5 accumulation. Set bit count to 0 and buffer to mysterious
// initialization constants.
static void MD5Init(MD5_CTX *ctx) {
ctx->buf[0] = 0x67452301;
ctx->buf[1] = 0xefcdab89;
ctx->buf[2] = 0x98badcfe;
ctx->buf[3] = 0x10325476;
ctx->bits[0] = 0;
ctx->bits[1] = 0;
}
static void MD5Transform(uint32_t buf[4], uint32_t const in[16]) {
register uint32_t a, b, c, d;
a = buf[0];
b = buf[1];
c = buf[2];
d = buf[3];
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
buf[0] += a;
buf[1] += b;
buf[2] += c;
buf[3] += d;
}
static void MD5Update(MD5_CTX *ctx, unsigned char const *buf, unsigned len) {
uint32_t t;
t = ctx->bits[0];
if ((ctx->bits[0] = t + ((uint32_t) len << 3)) < t)
ctx->bits[1]++;
ctx->bits[1] += len >> 29;
t = (t >> 3) & 0x3f;
if (t) {
unsigned char *p = (unsigned char *) ctx->in + t;
t = 64 - t;
if (len < t) {
memcpy(p, buf, len);
return;
}
memcpy(p, buf, t);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, (uint32_t *) ctx->in);
buf += t;
len -= t;
}
while (len >= 64) {
memcpy(ctx->in, buf, 64);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, (uint32_t *) ctx->in);
buf += 64;
len -= 64;
}
memcpy(ctx->in, buf, len);
}
static void MD5Final(unsigned char digest[16], MD5_CTX *ctx) {
unsigned count;
unsigned char *p;
uint32_t *a;
count = (ctx->bits[0] >> 3) & 0x3F;
p = ctx->in + count;
*p++ = 0x80;
count = 64 - 1 - count;
if (count < 8) {
memset(p, 0, count);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, (uint32_t *) ctx->in);
memset(ctx->in, 0, 56);
} else {
memset(p, 0, count - 8);
}
byteReverse(ctx->in, 14);
a = (uint32_t *)ctx->in;
a[14] = ctx->bits[0];
a[15] = ctx->bits[1];
MD5Transform(ctx->buf, (uint32_t *) ctx->in);
byteReverse((unsigned char *) ctx->buf, 4);
memcpy(digest, ctx->buf, 16);
memset((char *) ctx, 0, sizeof(*ctx));
}
#endif // !HAVE_MD5
// Stringify binary data. Output buffer must be twice as big as input,
// because each byte takes 2 bytes in string representation
static void bin2str(char *to, const unsigned char *p, size_t len) {
static const char *hex = "0123456789abcdef";
for (; len--; p++) {
*to++ = hex[p[0] >> 4];
*to++ = hex[p[0] & 0x0f];
}
*to = '\0';
}
// Return stringified MD5 hash for list of strings. Buffer must be 33 bytes.
char *mg_md5(char buf[33], ...) {
unsigned char hash[16];
const char *p;
va_list ap;
MD5_CTX ctx;
MD5Init(&ctx);
va_start(ap, buf);
while ((p = va_arg(ap, const char *)) != NULL) {
MD5Update(&ctx, (const unsigned char *) p, (unsigned) strlen(p));
}
va_end(ap);
MD5Final(hash, &ctx);
bin2str(buf, hash, sizeof(hash));
return buf;
}
// Check the user's password, return 1 if OK
static int check_password(const char *method, const char *ha1, const char *uri,
const char *nonce, const char *nc, const char *cnonce,
const char *qop, const char *response) {
char ha2[32 + 1], expected_response[32 + 1];
#if 0
// Check for authentication timeout
if ((unsigned long) time(NULL) - (unsigned long) to64(nonce) > 3600 * 2) {
return 0;
}
#endif
mg_md5(ha2, method, ":", uri, NULL);
mg_md5(expected_response, ha1, ":", nonce, ":", nc,
":", cnonce, ":", qop, ":", ha2, NULL);
return mg_strcasecmp(response, expected_response) == 0 ? MG_TRUE : MG_FALSE;
}
// Authorize against the opened passwords file. Return 1 if authorized.
int mg_authorize_digest(struct mg_connection *c, FILE *fp) {
struct connection *conn = MG_CONN_2_CONN(c);
const char *hdr;
char line[256], f_user[256], ha1[256], f_domain[256], user[100], nonce[100],
uri[MAX_REQUEST_SIZE], cnonce[100], resp[100], qop[100], nc[100];
if (c == NULL || fp == NULL) return 0;
if ((hdr = mg_get_header(c, "Authorization")) == NULL ||
mg_strncasecmp(hdr, "Digest ", 7) != 0) return 0;
if (!mg_parse_header(hdr, "username", user, sizeof(user))) return 0;
if (!mg_parse_header(hdr, "cnonce", cnonce, sizeof(cnonce))) return 0;
if (!mg_parse_header(hdr, "response", resp, sizeof(resp))) return 0;
if (!mg_parse_header(hdr, "uri", uri, sizeof(uri))) return 0;
if (!mg_parse_header(hdr, "qop", qop, sizeof(qop))) return 0;
if (!mg_parse_header(hdr, "nc", nc, sizeof(nc))) return 0;
if (!mg_parse_header(hdr, "nonce", nonce, sizeof(nonce))) return 0;
while (fgets(line, sizeof(line), fp) != NULL) {
if (sscanf(line, "%[^:]:%[^:]:%s", f_user, f_domain, ha1) == 3 &&
!strcmp(user, f_user) &&
// NOTE(lsm): due to a bug in MSIE, we do not compare URIs
!strcmp(conn->server->config_options[AUTH_DOMAIN], f_domain))
return check_password(c->request_method, ha1, uri,
nonce, nc, cnonce, qop, resp);
}
return MG_FALSE;
}
// Return 1 if request is authorised, 0 otherwise.
static int is_authorized(struct connection *conn, const char *path,
int is_directory) {
FILE *fp;
int authorized = MG_TRUE;
if ((fp = open_auth_file(conn, path, is_directory)) != NULL) {
authorized = mg_authorize_digest(&conn->mg_conn, fp);
fclose(fp);
}
return authorized;
}
static int is_authorized_for_dav(struct connection *conn) {
const char *auth_file = conn->server->config_options[DAV_AUTH_FILE];
const char *method = conn->mg_conn.request_method;
FILE *fp;
int authorized = MG_FALSE;
// If dav_auth_file is not set, allow non-authorized PROPFIND
if (method != NULL && !strcmp(method, "PROPFIND") && auth_file == NULL) {
authorized = MG_TRUE;
} else if (auth_file != NULL && (fp = fopen(auth_file, "r")) != NULL) {
authorized = mg_authorize_digest(&conn->mg_conn, fp);
fclose(fp);
}
return authorized;
}
static int is_dav_request(const struct connection *conn) {
const char *s = conn->mg_conn.request_method;
return !strcmp(s, "PUT") || !strcmp(s, "DELETE") ||
!strcmp(s, "MKCOL") || !strcmp(s, "PROPFIND");
}
#endif // MONGOOSE_NO_AUTH
static int parse_header(const char *str, int str_len, const char *var_name,
char *buf, size_t buf_size) {
int ch = ' ', ch1 = ',', len = 0, n = strlen(var_name);
const char *p, *end = str + str_len, *s = NULL;
if (buf != NULL && buf_size > 0) buf[0] = '\0';
// Find where variable starts
for (s = str; s != NULL && s + n < end; s++) {
if ((s == str || s[-1] == ch || s[-1] == ch1) && s[n] == '=' &&
!memcmp(s, var_name, n)) break;
}
if (s != NULL && &s[n + 1] < end) {
s += n + 1;
if (*s == '"' || *s == '\'') ch = ch1 = *s++;
p = s;
while (p < end && p[0] != ch && p[0] != ch1 && len < (int) buf_size) {
if (ch == ch1 && p[0] == '\\' && p[1] == ch) p++;
buf[len++] = *p++;
}
if (len >= (int) buf_size || (ch != ' ' && *p != ch)) {
len = 0;
} else {
if (len > 0 && s[len - 1] == ',') len--;
if (len > 0 && s[len - 1] == ';') len--;
buf[len] = '\0';
}
}
return len;
}
int mg_parse_header(const char *s, const char *var_name, char *buf,
size_t buf_size) {
return parse_header(s, s == NULL ? 0 : strlen(s), var_name, buf, buf_size);
}
#ifndef MONGOOSE_NO_SSI
static void send_ssi_file(struct mg_connection *, const char *, FILE *, int);
static void send_file_data(struct mg_connection *conn, FILE *fp) {
char buf[IOBUF_SIZE];
int n;
while ((n = fread(buf, 1, sizeof(buf), fp)) > 0) {
mg_write(conn, buf, n);
}
}
static void do_ssi_include(struct mg_connection *conn, const char *ssi,
char *tag, int include_level) {
char file_name[IOBUF_SIZE], path[MAX_PATH_SIZE], *p;
char **opts = (MG_CONN_2_CONN(conn))->server->config_options;
FILE *fp;
// sscanf() is safe here, since send_ssi_file() also uses buffer
// of size MG_BUF_LEN to get the tag. So strlen(tag) is always < MG_BUF_LEN.
if (sscanf(tag, " virtual=\"%[^\"]\"", file_name) == 1) {
// File name is relative to the webserver root
mg_snprintf(path, sizeof(path), "%s%c%s",
opts[DOCUMENT_ROOT], '/', file_name);
} else if (sscanf(tag, " abspath=\"%[^\"]\"", file_name) == 1) {
// File name is relative to the webserver working directory
// or it is absolute system path
mg_snprintf(path, sizeof(path), "%s", file_name);
} else if (sscanf(tag, " file=\"%[^\"]\"", file_name) == 1 ||
sscanf(tag, " \"%[^\"]\"", file_name) == 1) {
// File name is relative to the currect document
mg_snprintf(path, sizeof(path), "%s", ssi);
if ((p = strrchr(path, '/')) != NULL) {
p[1] = '\0';
}
mg_snprintf(path + strlen(path), sizeof(path) - strlen(path), "%s",
file_name);
} else {
mg_printf(conn, "Bad SSI #include: [%s]", tag);
return;
}
if ((fp = fopen(path, "rb")) == NULL) {
mg_printf(conn, "Cannot open SSI #include: [%s]: fopen(%s): %s",
tag, path, strerror(errno));
} else {
ns_set_close_on_exec(fileno(fp));
if (mg_match_prefix(opts[SSI_PATTERN], strlen(opts[SSI_PATTERN]),
path) > 0) {
send_ssi_file(conn, path, fp, include_level + 1);
} else {
send_file_data(conn, fp);
}
fclose(fp);
}
}
#ifndef MONGOOSE_NO_POPEN
static void do_ssi_exec(struct mg_connection *conn, char *tag) {
char cmd[IOBUF_SIZE];
FILE *fp;
if (sscanf(tag, " \"%[^\"]\"", cmd) != 1) {
mg_printf(conn, "Bad SSI #exec: [%s]", tag);
} else if ((fp = popen(cmd, "r")) == NULL) {
mg_printf(conn, "Cannot SSI #exec: [%s]: %s", cmd, strerror(errno));
} else {
send_file_data(conn, fp);
pclose(fp);
}
}
#endif // !MONGOOSE_NO_POPEN
static void send_ssi_file(struct mg_connection *conn, const char *path,
FILE *fp, int include_level) {
char buf[IOBUF_SIZE];
int ch, offset, len, in_ssi_tag;
if (include_level > 10) {
mg_printf(conn, "SSI #include level is too deep (%s)", path);
return;
}
in_ssi_tag = len = offset = 0;
while ((ch = fgetc(fp)) != EOF) {
if (in_ssi_tag && ch == '>') {
in_ssi_tag = 0;
buf[len++] = (char) ch;
buf[len] = '\0';
assert(len <= (int) sizeof(buf));
if (len < 6 || memcmp(buf, "<!--#", 5) != 0) {
// Not an SSI tag, pass it
(void) mg_write(conn, buf, (size_t) len);
} else {
if (!memcmp(buf + 5, "include", 7)) {
do_ssi_include(conn, path, buf + 12, include_level);
#if !defined(MONGOOSE_NO_POPEN)
} else if (!memcmp(buf + 5, "exec", 4)) {
do_ssi_exec(conn, buf + 9);
#endif // !NO_POPEN
} else {
mg_printf(conn, "%s: unknown SSI " "command: \"%s\"", path, buf);
}
}
len = 0;
} else if (in_ssi_tag) {
if (len == 5 && memcmp(buf, "<!--#", 5) != 0) {
// Not an SSI tag
in_ssi_tag = 0;
} else if (len == (int) sizeof(buf) - 2) {
mg_printf(conn, "%s: SSI tag is too large", path);
len = 0;
}
buf[len++] = ch & 0xff;
} else if (ch == '<') {
in_ssi_tag = 1;
if (len > 0) {
mg_write(conn, buf, (size_t) len);
}
len = 0;
buf[len++] = ch & 0xff;
} else {
buf[len++] = ch & 0xff;
if (len == (int) sizeof(buf)) {
mg_write(conn, buf, (size_t) len);
len = 0;
}
}
}
// Send the rest of buffered data
if (len > 0) {
mg_write(conn, buf, (size_t) len);
}
}
static void handle_ssi_request(struct connection *conn, const char *path) {
FILE *fp;
struct vec mime_vec;
if ((fp = fopen(path, "rb")) == NULL) {
send_http_error(conn, 500, "fopen(%s): %s", path, strerror(errno));
} else {
ns_set_close_on_exec(fileno(fp));
get_mime_type(conn->server, path, &mime_vec);
conn->mg_conn.status_code = 200;
mg_printf(&conn->mg_conn,
"HTTP/1.1 %d OK\r\n"
"Content-Type: %.*s\r\n"
"Connection: close\r\n\r\n",
conn->mg_conn.status_code, (int) mime_vec.len, mime_vec.ptr);
send_ssi_file(&conn->mg_conn, path, fp, 0);
fclose(fp);
close_local_endpoint(conn);
}
}
#endif
static void proxy_request(struct ns_connection *pc, struct mg_connection *c) {
int i, sent_close_header = 0;
ns_printf(pc, "%s %s%s%s HTTP/%s\r\n", c->request_method, c->uri,
c->query_string ? "?" : "",
c->query_string ? c->query_string : "",
c->http_version);
for (i = 0; i < c->num_headers; i++) {
if (mg_strcasecmp(c->http_headers[i].name, "Connection") == 0) {
// Force connection close, cause we don't parse proxy replies
// therefore we don't know message boundaries
ns_printf(pc, "%s: %s\r\n", "Connection", "close");
sent_close_header = 1;
} else {
ns_printf(pc, "%s: %s\r\n", c->http_headers[i].name,
c->http_headers[i].value);
}
}
if (!sent_close_header) {
ns_printf(pc, "%s: %s\r\n", "Connection", "close");
}
ns_printf(pc, "%s", "\r\n");
ns_send(pc, c->content, c->content_len);
}
#ifdef NS_ENABLE_SSL
int mg_terminate_ssl(struct mg_connection *c, const char *cert) {
static const char ok[] = "HTTP/1.0 200 OK\r\n\r\n";
struct connection *conn = MG_CONN_2_CONN(c);
SSL_CTX *ctx;
DBG(("%p MITM", conn));
if ((ctx = SSL_CTX_new(SSLv23_server_method())) == NULL) return 0;
SSL_CTX_use_certificate_file(ctx, cert, 1);
SSL_CTX_use_PrivateKey_file(ctx, cert, 1);
SSL_CTX_use_certificate_chain_file(ctx, cert);
// When clear-text reply is pushed to client, switch to SSL mode.
// TODO(lsm): check for send() failure
send(conn->ns_conn->sock, ok, sizeof(ok) - 1, 0);
//DBG(("%p %lu %d SEND", c, (unsigned long) sizeof(ok) - 1, n));
conn->ns_conn->send_iobuf.len = 0;
conn->endpoint_type = EP_USER; // To keep-alive in close_local_endpoint()
close_local_endpoint(conn); // Clean up current CONNECT request
if ((conn->ns_conn->ssl = SSL_new(ctx)) != NULL) {
SSL_set_fd(conn->ns_conn->ssl, conn->ns_conn->sock);
}
SSL_CTX_free(ctx);
return 1;
}
#endif
int mg_forward(struct mg_connection *c, const char *addr) {
static const char ok[] = "HTTP/1.1 200 OK\r\n\r\n";
struct connection *conn = MG_CONN_2_CONN(c);
struct ns_connection *pc;
if ((pc = ns_connect(&conn->server->ns_mgr, addr,
mg_ev_handler, conn)) == NULL) {
conn->ns_conn->flags |= NSF_CLOSE_IMMEDIATELY;
return 0;
}
// Interlink two connections
pc->flags |= MG_PROXY_CONN;
conn->endpoint_type = EP_PROXY;
conn->endpoint.nc = pc;
DBG(("%p [%s] [%s] -> %p %p", conn, c->uri, addr, pc, conn->ns_conn->ssl));
if (strcmp(c->request_method, "CONNECT") == 0) {
// For CONNECT request, reply with 200 OK. Tunnel is established.
// TODO(lsm): check for send() failure
(void) send(conn->ns_conn->sock, ok, sizeof(ok) - 1, 0);
} else {
// Strip "http://host:port" part from the URI
if (memcmp(c->uri, "http://", 7) == 0) c->uri += 7;
while (*c->uri != '\0' && *c->uri != '/') c->uri++;
proxy_request(pc, c);
}
return 1;
}
static void proxify_connection(struct connection *conn) {
char proto[10], host[500], cert[500], addr[1000];
unsigned short port = 80;
struct mg_connection *c = &conn->mg_conn;
int n = 0;
const char *url = c->uri;
proto[0] = host[0] = cert[0] = '\0';
if (sscanf(url, "%499[^: ]:%hu%n", host, &port, &n) != 2 &&
sscanf(url, "%9[a-z]://%499[^: ]:%hu%n", proto, host, &port, &n) != 3 &&
sscanf(url, "%9[a-z]://%499[^/ ]%n", proto, host, &n) != 2) {
n = 0;
}
snprintf(addr, sizeof(addr), "%s://%s:%hu",
conn->ns_conn->ssl != NULL ? "ssl" : "tcp", host, port);
if (n <= 0 || !mg_forward(c, addr)) {
conn->ns_conn->flags |= NSF_CLOSE_IMMEDIATELY;
}
}
#ifndef MONGOOSE_NO_FILESYSTEM
void mg_send_file_internal(struct mg_connection *c, const char *file_name,
file_stat_t *st, int exists,
const char *extra_headers) {
struct connection *conn = MG_CONN_2_CONN(c);
char path[MAX_PATH_SIZE];
const int is_directory = S_ISDIR(st->st_mode);
#ifndef MONGOOSE_NO_CGI
const char *cgi_pat = conn->server->config_options[CGI_PATTERN];
#else
const char *cgi_pat = DEFAULT_CGI_PATTERN;
#endif
#ifndef MONGOOSE_NO_DIRECTORY_LISTING
const char *dir_lst = conn->server->config_options[ENABLE_DIRECTORY_LISTING];
#else
const char *dir_lst = "yes";
#endif
mg_snprintf(path, sizeof(path), "%s", file_name);
if (!exists || must_hide_file(conn, path)) {
send_http_error(conn, 404, NULL);
} else if (is_directory &&
conn->mg_conn.uri[strlen(conn->mg_conn.uri) - 1] != '/') {
conn->mg_conn.status_code = 301;
mg_printf(&conn->mg_conn, "HTTP/1.1 301 Moved Permanently\r\n"
"Location: %s/\r\n\r\n", conn->mg_conn.uri);
close_local_endpoint(conn);
} else if (is_directory && !find_index_file(conn, path, sizeof(path), st)) {
if (!mg_strcasecmp(dir_lst, "yes")) {
#ifndef MONGOOSE_NO_DIRECTORY_LISTING
send_directory_listing(conn, path);
#else
send_http_error(conn, 501, NULL);
#endif
} else {
send_http_error(conn, 403, NULL);
}
} else if (mg_match_prefix(cgi_pat, strlen(cgi_pat), path) > 0) {
#if !defined(MONGOOSE_NO_CGI)
open_cgi_endpoint(conn, path);
#else
send_http_error(conn, 501, NULL);
#endif // !MONGOOSE_NO_CGI
#ifndef MONGOOSE_NO_SSI
} else if (mg_match_prefix(conn->server->config_options[SSI_PATTERN],
strlen(conn->server->config_options[SSI_PATTERN]),
path) > 0) {
handle_ssi_request(conn, path);
#endif
} else if (is_not_modified(conn, st)) {
send_http_error(conn, 304, NULL);
} else if ((conn->endpoint.fd = open(path, O_RDONLY | O_BINARY, 0)) != -1) {
// O_BINARY is required for Windows, otherwise in default text mode
// two bytes \r\n will be read as one.
open_file_endpoint(conn, path, st, extra_headers);
} else {
send_http_error(conn, 404, NULL);
}
}
void mg_send_file(struct mg_connection *c, const char *file_name,
const char *extra_headers) {
file_stat_t st;
const int exists = stat(file_name, &st) == 0;
mg_send_file_internal(c, file_name, &st, exists, extra_headers);
}
#endif // !MONGOOSE_NO_FILESYSTEM
static void open_local_endpoint(struct connection *conn, int skip_user) {
#ifndef MONGOOSE_NO_FILESYSTEM
char path[MAX_PATH_SIZE];
file_stat_t st;
int exists = 0;
#endif
// If EP_USER was set in a prev call, reset it
conn->endpoint_type = EP_NONE;
#ifndef MONGOOSE_NO_AUTH
if (conn->server->event_handler && call_user(conn, MG_AUTH) == MG_FALSE) {
mg_send_digest_auth_request(&conn->mg_conn);
return;
}
#endif
// Call URI handler if one is registered for this URI
if (skip_user == 0 && conn->server->event_handler != NULL) {
conn->endpoint_type = EP_USER;
#if MONGOOSE_POST_SIZE_LIMIT > 1
{
const char *cl = mg_get_header(&conn->mg_conn, "Content-Length");
if ((strcmp(conn->mg_conn.request_method, "POST") == 0 ||
strcmp(conn->mg_conn.request_method, "PUT") == 0) &&
(cl == NULL || to64(cl) > MONGOOSE_POST_SIZE_LIMIT)) {
send_http_error(conn, 500, "POST size > %lu",
(unsigned long) MONGOOSE_POST_SIZE_LIMIT);
}
}
#endif
return;
}
if (strcmp(conn->mg_conn.request_method, "CONNECT") == 0 ||
mg_strncasecmp(conn->mg_conn.uri, "http", 4) == 0) {
const char *enp = conn->server->config_options[ENABLE_PROXY];
if (enp == NULL || strcmp(enp, "yes") != 0) {
send_http_error(conn, 405, NULL);
} else {
proxify_connection(conn);
}
return;
}
if (!strcmp(conn->mg_conn.request_method, "OPTIONS")) {
send_options(conn);
return;
}
#ifdef MONGOOSE_NO_FILESYSTEM
send_http_error(conn, 404, NULL);
#else
exists = convert_uri_to_file_name(conn, path, sizeof(path), &st);
if (!strcmp(conn->mg_conn.request_method, "OPTIONS")) {
send_options(conn);
} else if (conn->server->config_options[DOCUMENT_ROOT] == NULL) {
send_http_error(conn, 404, NULL);
#ifndef MONGOOSE_NO_AUTH
} else if ((!is_dav_request(conn) && !is_authorized(conn, path,
exists && S_ISDIR(st.st_mode))) ||
(is_dav_request(conn) && !is_authorized_for_dav(conn))) {
mg_send_digest_auth_request(&conn->mg_conn);
close_local_endpoint(conn);
#endif
#ifndef MONGOOSE_NO_DAV
} else if (must_hide_file(conn, path)) {
send_http_error(conn, 404, NULL);
} else if (!strcmp(conn->mg_conn.request_method, "PROPFIND")) {
handle_propfind(conn, path, &st, exists);
} else if (!strcmp(conn->mg_conn.request_method, "MKCOL")) {
handle_mkcol(conn, path);
} else if (!strcmp(conn->mg_conn.request_method, "DELETE")) {
handle_delete(conn, path);
} else if (!strcmp(conn->mg_conn.request_method, "PUT")) {
handle_put(conn, path);
#endif
} else {
mg_send_file_internal(&conn->mg_conn, path, &st, exists, NULL);
}
#endif // MONGOOSE_NO_FILESYSTEM
}
static void send_continue_if_expected(struct connection *conn) {
static const char expect_response[] = "HTTP/1.1 100 Continue\r\n\r\n";
const char *expect_hdr = mg_get_header(&conn->mg_conn, "Expect");
if (expect_hdr != NULL && !mg_strcasecmp(expect_hdr, "100-continue")) {
ns_send(conn->ns_conn, expect_response, sizeof(expect_response) - 1);
}
}
// Conform to http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
static int is_valid_uri(const char *uri) {
unsigned short n;
return uri[0] == '/' ||
strcmp(uri, "*") == 0 || // OPTIONS method can use asterisk URI
mg_strncasecmp(uri, "http", 4) == 0 || // Naive check for the absolute URI
sscanf(uri, "%*[^ :]:%hu", &n) > 0; // CONNECT method can use host:port
}
static void try_parse(struct connection *conn) {
struct iobuf *io = &conn->ns_conn->recv_iobuf;
if (conn->request_len == 0 &&
(conn->request_len = get_request_len(io->buf, io->len)) > 0) {
// If request is buffered in, remove it from the iobuf. This is because
// iobuf could be reallocated, and pointers in parsed request could
// become invalid.
conn->request = (char *) NS_MALLOC(conn->request_len);
memcpy(conn->request, io->buf, conn->request_len);
//DBG(("%p [%.*s]", conn, conn->request_len, conn->request));
iobuf_remove(io, conn->request_len);
conn->request_len = parse_http_message(conn->request, conn->request_len,
&conn->mg_conn);
if (conn->request_len > 0) {
const char *cl_hdr = mg_get_header(&conn->mg_conn, "Content-Length");
conn->cl = cl_hdr == NULL ? 0 : to64(cl_hdr);
conn->mg_conn.content_len = (size_t) conn->cl;
}
}
}
static void do_proxy(struct connection *conn) {
if (0 && conn->request_len == 0) {
try_parse(conn);
DBG(("%p parsing -> %d", conn, conn->request_len));
if (conn->request_len > 0 && call_user(conn, MG_REQUEST) == MG_FALSE) {
proxy_request(conn->endpoint.nc, &conn->mg_conn);
} else if (conn->request_len < 0) {
ns_forward(conn->ns_conn, conn->endpoint.nc);
}
} else {
DBG(("%p forwarding", conn));
ns_forward(conn->ns_conn, conn->endpoint.nc);
}
}
static void on_recv_data(struct connection *conn) {
struct iobuf *io = &conn->ns_conn->recv_iobuf;
int n;
if (conn->endpoint_type == EP_PROXY) {
if (conn->endpoint.nc != NULL) do_proxy(conn);
return;
}
try_parse(conn);
DBG(("%p %d %lu %d", conn, conn->request_len, (unsigned long)io->len,
conn->ns_conn->flags));
if (conn->request_len < 0 ||
(conn->request_len > 0 && !is_valid_uri(conn->mg_conn.uri))) {
send_http_error(conn, 400, NULL);
} else if (conn->request_len == 0 && io->len > MAX_REQUEST_SIZE) {
send_http_error(conn, 413, NULL);
} else if (conn->request_len > 0 &&
strcmp(conn->mg_conn.http_version, "1.0") != 0 &&
strcmp(conn->mg_conn.http_version, "1.1") != 0) {
send_http_error(conn, 505, NULL);
} else if (conn->request_len > 0 && conn->endpoint_type == EP_NONE) {
#ifndef MONGOOSE_NO_WEBSOCKET
send_websocket_handshake_if_requested(&conn->mg_conn);
#endif
send_continue_if_expected(conn);
open_local_endpoint(conn, 0);
}
#ifndef MONGOOSE_NO_CGI
if (conn->endpoint_type == EP_CGI && conn->endpoint.nc != NULL) {
ns_forward(conn->ns_conn, conn->endpoint.nc);
}
#endif
if (conn->endpoint_type == EP_USER) {
conn->mg_conn.content = io->buf;
conn->mg_conn.content_len = io->len;
n = call_user(conn, MG_RECV);
if (n < 0) {
conn->ns_conn->flags |= NSF_FINISHED_SENDING_DATA;
} else if ((size_t) n <= io->len) {
iobuf_remove(io, n);
}
call_request_handler_if_data_is_buffered(conn);
}
#ifndef MONGOOSE_NO_DAV
if (conn->endpoint_type == EP_PUT && io->len > 0) {
forward_put_data(conn);
}
#endif
}
static void call_http_client_handler(struct connection *conn) {
//conn->mg_conn.status_code = code;
// For responses without Content-Lengh, use the whole buffer
if (conn->cl == 0) {
conn->mg_conn.content_len = conn->ns_conn->recv_iobuf.len;
}
conn->mg_conn.content = conn->ns_conn->recv_iobuf.buf;
if (call_user(conn, MG_REPLY) == MG_FALSE) {
conn->ns_conn->flags |= NSF_CLOSE_IMMEDIATELY;
}
iobuf_remove(&conn->ns_conn->recv_iobuf, conn->mg_conn.content_len);
conn->mg_conn.status_code = 0;
conn->cl = conn->num_bytes_recv = conn->request_len = 0;
NS_FREE(conn->request);
conn->request = NULL;
}
static void process_response(struct connection *conn) {
struct iobuf *io = &conn->ns_conn->recv_iobuf;
try_parse(conn);
DBG(("%p %d %lu", conn, conn->request_len, (unsigned long)io->len));
if (conn->request_len < 0 ||
(conn->request_len == 0 && io->len > MAX_REQUEST_SIZE)) {
call_http_client_handler(conn);
} else if ((int64_t) io->len >= conn->cl) {
call_http_client_handler(conn);
}
}
struct mg_connection *mg_connect(struct mg_server *server, const char *addr) {
struct ns_connection *nsconn;
struct connection *conn;
nsconn = ns_connect(&server->ns_mgr, addr, mg_ev_handler, NULL);
if (nsconn == NULL) return 0;
if ((conn = (struct connection *) NS_CALLOC(1, sizeof(*conn))) == NULL) {
nsconn->flags |= NSF_CLOSE_IMMEDIATELY;
return 0;
}
// Interlink two structs
conn->ns_conn = nsconn;
nsconn->user_data = conn;
conn->server = server;
conn->endpoint_type = EP_CLIENT;
//conn->handler = handler;
conn->mg_conn.server_param = server->ns_mgr.user_data;
conn->ns_conn->flags = NSF_CONNECTING;
return &conn->mg_conn;
}
#ifndef MONGOOSE_NO_LOGGING
static void log_header(const struct mg_connection *conn, const char *header,
FILE *fp) {
const char *header_value;
if ((header_value = mg_get_header(conn, header)) == NULL) {
(void) fprintf(fp, "%s", " -");
} else {
(void) fprintf(fp, " \"%s\"", header_value);
}
}
static void log_access(const struct connection *conn, const char *path) {
const struct mg_connection *c = &conn->mg_conn;
FILE *fp = (path == NULL) ? NULL : fopen(path, "a+");
char date[64], user[100];
time_t now;
if (fp == NULL) return;
now = time(NULL);
strftime(date, sizeof(date), "%d/%b/%Y:%H:%M:%S %z", localtime(&now));
flockfile(fp);
mg_parse_header(mg_get_header(&conn->mg_conn, "Authorization"), "username",
user, sizeof(user));
fprintf(fp, "%s - %s [%s] \"%s %s%s%s HTTP/%s\" %d 0",
c->remote_ip, user[0] == '\0' ? "-" : user, date,
c->request_method ? c->request_method : "-",
c->uri ? c->uri : "-", c->query_string ? "?" : "",
c->query_string ? c->query_string : "",
c->http_version, c->status_code);
log_header(c, "Referer", fp);
log_header(c, "User-Agent", fp);
fputc('\n', fp);
fflush(fp);
funlockfile(fp);
fclose(fp);
}
#endif
static void close_local_endpoint(struct connection *conn) {
struct mg_connection *c = &conn->mg_conn;
// Must be done before free()
int keep_alive = should_keep_alive(&conn->mg_conn) &&
(conn->endpoint_type == EP_FILE || conn->endpoint_type == EP_USER);
DBG(("%p %d %d %d", conn, conn->endpoint_type, keep_alive,
conn->ns_conn->flags));
switch (conn->endpoint_type) {
case EP_PUT:
case EP_FILE:
close(conn->endpoint.fd);
break;
case EP_CGI:
case EP_PROXY:
if (conn->endpoint.nc != NULL) {
DBG(("%p %p %p :-)", conn, conn->ns_conn, conn->endpoint.nc));
conn->endpoint.nc->flags |= NSF_CLOSE_IMMEDIATELY;
conn->endpoint.nc->user_data = NULL;
}
break;
default: break;
}
#ifndef MONGOOSE_NO_LOGGING
if (c->status_code > 0 && conn->endpoint_type != EP_CLIENT &&
c->status_code != 400) {
log_access(conn, conn->server->config_options[ACCESS_LOG_FILE]);
}
#endif
// Gobble possible POST data sent to the URI handler
iobuf_free(&conn->ns_conn->recv_iobuf);
NS_FREE(conn->request);
NS_FREE(conn->path_info);
conn->endpoint.nc = NULL;
conn->request = conn->path_info = NULL;
conn->endpoint_type = EP_NONE;
conn->cl = conn->num_bytes_recv = conn->request_len = 0;
conn->ns_conn->flags &= ~(NSF_FINISHED_SENDING_DATA |
NSF_BUFFER_BUT_DONT_SEND | NSF_CLOSE_IMMEDIATELY |
MG_HEADERS_SENT | MG_LONG_RUNNING);
// Do not memset() the whole structure, as some of the fields
// (IP addresses & ports, server_param) must survive. Nullify the rest.
c->request_method = c->uri = c->http_version = c->query_string = NULL;
c->num_headers = c->status_code = c->is_websocket = c->content_len = 0;
c->connection_param = c->callback_param = NULL;
if (keep_alive) {
on_recv_data(conn); // Can call us recursively if pipelining is used
} else {
conn->ns_conn->flags |= conn->ns_conn->send_iobuf.len == 0 ?
NSF_CLOSE_IMMEDIATELY : NSF_FINISHED_SENDING_DATA;
}
}
static void transfer_file_data(struct connection *conn) {
char buf[IOBUF_SIZE];
int n;
// If output buffer is too big, don't send anything. Wait until
// mongoose drains already buffered data to the client.
if (conn->ns_conn->send_iobuf.len > sizeof(buf) * 2) return;
// Do not send anyt
n = read(conn->endpoint.fd, buf, conn->cl < (int64_t) sizeof(buf) ?
(int) conn->cl : (int) sizeof(buf));
if (n <= 0) {
close_local_endpoint(conn);
} else if (n > 0) {
conn->cl -= n;
ns_send(conn->ns_conn, buf, n);
if (conn->cl <= 0) {
close_local_endpoint(conn);
}
}
}
int mg_poll_server(struct mg_server *server, int milliseconds) {
return ns_mgr_poll(&server->ns_mgr, milliseconds);
}
void mg_destroy_server(struct mg_server **server) {
if (server != NULL && *server != NULL) {
struct mg_server *s = *server;
int i;
ns_mgr_free(&s->ns_mgr);
for (i = 0; i < (int) ARRAY_SIZE(s->config_options); i++) {
NS_FREE(s->config_options[i]); // It is OK to free(NULL)
}
NS_FREE(s);
*server = NULL;
}
}
struct mg_connection *mg_next(struct mg_server *s, struct mg_connection *c) {
struct ns_connection *nc = ns_next(&s->ns_mgr, c == NULL ? NULL :
MG_CONN_2_CONN(c)->ns_conn);
if (nc != NULL && nc->user_data != NULL) {
return & ((struct connection *) nc->user_data)->mg_conn;
} else {
return NULL;
}
}
static int get_var(const char *data, size_t data_len, const char *name,
char *dst, size_t dst_len) {
const char *p, *e, *s;
size_t name_len;
int len;
if (dst == NULL || dst_len == 0) {
len = -2;
} else if (data == NULL || name == NULL || data_len == 0) {
len = -1;
dst[0] = '\0';
} else {
name_len = strlen(name);
e = data + data_len;
len = -1;
dst[0] = '\0';
// data is "var1=val1&var2=val2...". Find variable first
for (p = data; p + name_len < e; p++) {
if ((p == data || p[-1] == '&') && p[name_len] == '=' &&
!mg_strncasecmp(name, p, name_len)) {
// Point p to variable value
p += name_len + 1;
// Point s to the end of the value
s = (const char *) memchr(p, '&', (size_t)(e - p));
if (s == NULL) {
s = e;
}
assert(s >= p);
// Decode variable into destination buffer
len = mg_url_decode(p, (size_t)(s - p), dst, dst_len, 1);
// Redirect error code from -1 to -2 (destination buffer too small).
if (len == -1) {
len = -2;
}
break;
}
}
}
return len;
}
int mg_get_var(const struct mg_connection *conn, const char *name,
char *dst, size_t dst_len) {
int len = get_var(conn->query_string, conn->query_string == NULL ? 0 :
strlen(conn->query_string), name, dst, dst_len);
if (len < 0) {
len = get_var(conn->content, conn->content_len, name, dst, dst_len);
}
return len;
}
static int get_line_len(const char *buf, int buf_len) {
int len = 0;
while (len < buf_len && buf[len] != '\n') len++;
return buf[len] == '\n' ? len + 1: -1;
}
int mg_parse_multipart(const char *buf, int buf_len,
char *var_name, int var_name_len,
char *file_name, int file_name_len,
const char **data, int *data_len) {
static const char cd[] = "Content-Disposition: ";
//struct mg_connection c;
int hl, bl, n, ll, pos, cdl = sizeof(cd) - 1;
//char *p;
if (buf == NULL || buf_len <= 0) return 0;
if ((hl = get_request_len(buf, buf_len)) <= 0) return 0;
if (buf[0] != '-' || buf[1] != '-' || buf[2] == '\n') return 0;
// Get boundary length
bl = get_line_len(buf, buf_len);
// Loop through headers, fetch variable name and file name
var_name[0] = file_name[0] = '\0';
for (n = bl; (ll = get_line_len(buf + n, hl - n)) > 0; n += ll) {
if (mg_strncasecmp(cd, buf + n, cdl) == 0) {
parse_header(buf + n + cdl, ll - (cdl + 2), "name",
var_name, var_name_len);
parse_header(buf + n + cdl, ll - (cdl + 2), "filename",
file_name, file_name_len);
}
}
// Scan body, search for terminating boundary
for (pos = hl; pos + (bl - 2) < buf_len; pos++) {
if (buf[pos] == '-' && !memcmp(buf, &buf[pos], bl - 2)) {
if (data_len != NULL) *data_len = (pos - 2) - hl;
if (data != NULL) *data = buf + hl;
return pos;
}
}
return 0;
}
const char **mg_get_valid_option_names(void) {
return static_config_options;
}
void mg_copy_listeners(struct mg_server *s, struct mg_server *to) {
struct ns_connection *c;
for (c = ns_next(&s->ns_mgr, NULL); c != NULL; c = ns_next(&s->ns_mgr, c)) {
struct ns_connection *tmp;
if ((c->flags & NSF_LISTENING) &&
(tmp = (struct ns_connection *) NS_MALLOC(sizeof(*tmp))) != NULL) {
memcpy(tmp, c, sizeof(*tmp));
tmp->mgr = &to->ns_mgr;
ns_add_conn(tmp->mgr, tmp);
}
}
}
static int get_option_index(const char *name) {
int i;
for (i = 0; static_config_options[i * 2] != NULL; i++) {
if (strcmp(static_config_options[i * 2], name) == 0) {
return i;
}
}
return -1;
}
static void set_default_option_values(char **opts) {
const char *value, **all_opts = mg_get_valid_option_names();
int i;
for (i = 0; all_opts[i * 2] != NULL; i++) {
value = all_opts[i * 2 + 1];
if (opts[i] == NULL && value != NULL) {
opts[i] = mg_strdup(value);
}
}
}
const char *mg_set_option(struct mg_server *server, const char *name,
const char *value) {
int ind = get_option_index(name);
const char *error_msg = NULL;
char **v = NULL;
if (ind < 0) return "No such option";
v = &server->config_options[ind];
// Return success immediately if setting to the same value
if ((*v == NULL && value == NULL) ||
(value != NULL && *v != NULL && !strcmp(value, *v))) {
return NULL;
}
if (*v != NULL) {
NS_FREE(*v);
*v = NULL;
}
if (value == NULL || value[0] == '\0') return NULL;
*v = mg_strdup(value);
DBG(("%s [%s]", name, *v));
if (ind == LISTENING_PORT) {
struct vec vec;
while ((value = next_option(value, &vec, NULL)) != NULL) {
struct ns_connection *c = ns_bind(&server->ns_mgr, vec.ptr,
mg_ev_handler, NULL);
if (c== NULL) {
error_msg = "Cannot bind to port";
break;
} else {
char buf[100];
ns_sock_to_str(c->sock, buf, sizeof(buf), 2);
NS_FREE(*v);
*v = mg_strdup(buf);
}
}
#ifndef MONGOOSE_NO_FILESYSTEM
} else if (ind == HEXDUMP_FILE) {
server->ns_mgr.hexdump_file = *v;
#endif
#if !defined(_WIN32) && !defined(MONGOOSE_NO_USER)
} else if (ind == RUN_AS_USER) {
struct passwd *pw;
if ((pw = getpwnam(value)) == NULL) {
error_msg = "Unknown user";
} else if (setgid(pw->pw_gid) != 0) {
error_msg = "setgid() failed";
} else if (setuid(pw->pw_uid) != 0) {
error_msg = "setuid() failed";
}
#endif
}
return error_msg;
}
static void set_ips(struct ns_connection *nc, int is_rem) {
struct connection *conn = (struct connection *) nc->user_data;
struct mg_connection *c = &conn->mg_conn;
char buf[100];
ns_sock_to_str(nc->sock, buf, sizeof(buf), is_rem ? 7 : 3);
sscanf(buf, "%47[^:]:%hu",
is_rem ? c->remote_ip : c->local_ip,
is_rem ? &c->remote_port : &c->local_port);
//DBG(("%p %s %s", conn, is_rem ? "rem" : "loc", buf));
}
static void on_accept(struct ns_connection *nc, union socket_address *sa) {
struct mg_server *server = (struct mg_server *) nc->mgr;
struct connection *conn;
if (!check_acl(server->config_options[ACCESS_CONTROL_LIST],
ntohl(* (uint32_t *) &sa->sin.sin_addr)) ||
(conn = (struct connection *) NS_CALLOC(1, sizeof(*conn))) == NULL) {
nc->flags |= NSF_CLOSE_IMMEDIATELY;
} else {
// Circularly link two connection structures
nc->user_data = conn;
conn->ns_conn = nc;
// Initialize the rest of connection attributes
conn->server = server;
conn->mg_conn.server_param = nc->mgr->user_data;
set_ips(nc, 1);
set_ips(nc, 0);
}
}
static void process_udp(struct ns_connection *nc) {
struct iobuf *io = &nc->recv_iobuf;
struct connection conn;
memset(&conn, 0, sizeof(conn));
conn.ns_conn = nc;
conn.server = (struct mg_server *) nc->mgr;
conn.request_len = parse_http_message(io->buf, io->len, &conn.mg_conn);
on_recv_data(&conn);
//ns_printf(nc, "%s", "HTTP/1.0 200 OK\r\n\r\n");
}
static void mg_ev_handler(struct ns_connection *nc, int ev, void *p) {
struct connection *conn = (struct connection *) nc->user_data;
// Send NS event to the handler. Note that call_user won't send an event
// if conn == NULL. Therefore, repeat this for NS_ACCEPT event as well.
#ifdef MONGOOSE_SEND_NS_EVENTS
{
struct connection *conn = (struct connection *) nc->user_data;
void *param[2] = { nc, p };
if (conn != NULL) conn->mg_conn.callback_param = param;
call_user(conn, (enum mg_event) ev);
}
#endif
switch (ev) {
case NS_ACCEPT:
on_accept(nc, (union socket_address *) p);
#ifdef MONGOOSE_SEND_NS_EVENTS
{
struct connection *conn = (struct connection *) nc->user_data;
void *param[2] = { nc, p };
if (conn != NULL) conn->mg_conn.callback_param = param;
call_user(conn, (enum mg_event) ev);
}
#endif
break;
case NS_CONNECT:
if (nc->user_data != NULL) {
set_ips(nc, 1);
set_ips(nc, 0);
}
conn->mg_conn.status_code = * (int *) p;
if (conn->mg_conn.status_code != 0 ||
(!(nc->flags & MG_PROXY_CONN) &&
call_user(conn, MG_CONNECT) == MG_FALSE)) {
nc->flags |= NSF_CLOSE_IMMEDIATELY;
}
break;
case NS_RECV:
if (conn != NULL) {
conn->num_bytes_recv += * (int *) p;
}
if (nc->flags & NSF_UDP) {
process_udp(nc);
} else if (nc->listener != NULL) {
on_recv_data(conn);
#ifndef MONGOOSE_NO_CGI
} else if (nc->flags & MG_CGI_CONN) {
on_cgi_data(nc);
#endif
} else if (nc->flags & MG_PROXY_CONN) {
if (conn != NULL) {
ns_forward(nc, conn->ns_conn);
}
} else {
process_response(conn);
}
break;
case NS_SEND:
break;
case NS_CLOSE:
nc->user_data = NULL;
if (nc->flags & (MG_CGI_CONN | MG_PROXY_CONN)) {
DBG(("%p %p closing cgi/proxy conn", conn, nc));
if (conn && conn->ns_conn) {
conn->ns_conn->flags &= ~NSF_BUFFER_BUT_DONT_SEND;
conn->ns_conn->flags |= conn->ns_conn->send_iobuf.len > 0 ?
NSF_FINISHED_SENDING_DATA : NSF_CLOSE_IMMEDIATELY;
conn->endpoint.nc = NULL;
}
} else if (conn != NULL) {
DBG(("%p %p %d closing", conn, nc, conn->endpoint_type));
if (conn->endpoint_type == EP_CLIENT && nc->recv_iobuf.len > 0) {
call_http_client_handler(conn);
}
call_user(conn, MG_CLOSE);
close_local_endpoint(conn);
conn->ns_conn = NULL;
NS_FREE(conn);
}
break;
case NS_POLL:
if (conn != NULL) {
if (call_user(conn, MG_POLL) == MG_TRUE) {
if (conn->ns_conn->flags & MG_HEADERS_SENT) {
write_terminating_chunk(conn);
}
close_local_endpoint(conn);
}
if (conn->endpoint_type == EP_FILE) {
transfer_file_data(conn);
}
}
// Expire idle connections
{
time_t current_time = * (time_t *) p;
if (conn != NULL && conn->mg_conn.is_websocket) {
ping_idle_websocket_connection(conn, current_time);
}
if (nc->listener != NULL &&
nc->last_io_time + MONGOOSE_IDLE_TIMEOUT_SECONDS < current_time) {
mg_ev_handler(nc, NS_CLOSE, NULL);
nc->flags |= NSF_CLOSE_IMMEDIATELY;
}
}
break;
default:
break;
}
}
static void iter2(struct ns_connection *nc, int ev, void *param) {
mg_handler_t func = NULL;
struct connection *conn = (struct connection *) nc->user_data;
const char *msg = (const char *) param;
int n;
(void) ev;
//DBG(("%p [%s]", conn, msg));
if (sscanf(msg, "%p %n", &func, &n) && func != NULL && conn != NULL) {
conn->mg_conn.callback_param = (void *) (msg + n);
func(&conn->mg_conn, MG_POLL);
}
}
void mg_wakeup_server_ex(struct mg_server *server, mg_handler_t cb,
const char *fmt, ...) {
va_list ap;
char buf[8 * 1024];
int len;
// Encode callback (cb) into a buffer
len = snprintf(buf, sizeof(buf), "%p ", cb);
va_start(ap, fmt);
len += vsnprintf(buf + len, sizeof(buf) - len, fmt, ap);
va_end(ap);
// "len + 1" is to include terminating \0 in the message
ns_broadcast(&server->ns_mgr, iter2, buf, len + 1);
}
void mg_wakeup_server(struct mg_server *server) {
ns_broadcast(&server->ns_mgr, NULL, (void *) "", 0);
}
const char *mg_get_option(const struct mg_server *server, const char *name) {
const char **opts = (const char **) server->config_options;
int i = get_option_index(name);
return i == -1 ? NULL : opts[i] == NULL ? "" : opts[i];
}
struct mg_server *mg_create_server(void *server_data, mg_handler_t handler) {
struct mg_server *server = (struct mg_server *) NS_CALLOC(1, sizeof(*server));
ns_mgr_init(&server->ns_mgr, server_data);
set_default_option_values(server->config_options);
server->event_handler = handler;
return server;
}
| 1 | 10,495 | This is also a question of style. src[i] will always be valid because of check if 'for' statement | BOINC-boinc | php |
@@ -398,7 +398,7 @@ func killKubeEdgeBinary(proc string) error {
//isKubeEdgeProcessRunning checks if the given process is running or not
func isKubeEdgeProcessRunning(proc string) (bool, error) {
- procRunning := fmt.Sprintf("pidof %s 2&>1", proc)
+ procRunning := fmt.Sprintf("pidof %s 2>&1", proc)
cmd := NewCommand(procRunning)
err := cmd.Exec() | 1 | /*
Copyright 2019 The KubeEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"archive/tar"
"compress/gzip"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/blang/semver"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
types "github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/common"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha1"
)
// Constants used by installers
const (
KubeEdgeDownloadURL = "https://github.com/kubeedge/kubeedge/releases/download"
EdgeServiceFile = "edgecore.service"
CloudServiceFile = "cloudcore.service"
ServiceFileURLFormat = "https://raw.githubusercontent.com/kubeedge/kubeedge/release-%s/build/tools/%s"
KubeEdgePath = "/etc/kubeedge/"
KubeEdgeUsrBinPath = "/usr/local/bin"
KubeEdgeBinaryName = "edgecore"
KubeCloudBinaryName = "cloudcore"
KubeEdgeConfigDir = KubeEdgePath + "config/"
KubeEdgeCloudCoreNewYaml = KubeEdgeConfigDir + "cloudcore.yaml"
KubeEdgeEdgeCoreNewYaml = KubeEdgeConfigDir + "edgecore.yaml"
KubeEdgeLogPath = "/var/log/kubeedge/"
KubeEdgeCrdPath = KubeEdgePath + "crds"
KubeEdgeCRDDownloadURL = "https://raw.githubusercontent.com/kubeedge/kubeedge/master/build/crds"
latestReleaseVersionURL = "https://kubeedge.io/latestversion"
RetryTimes = 5
APT string = "apt"
YUM string = "yum"
)
//AddToolVals gets the value and default values of each flags and collects them in temporary cache
func AddToolVals(f *pflag.Flag, flagData map[string]types.FlagData) {
flagData[f.Name] = types.FlagData{Val: f.Value.String(), DefVal: f.DefValue}
}
//CheckIfAvailable checks is val of a flag is empty then return the default value
func CheckIfAvailable(val, defval string) string {
if val == "" {
return defval
}
return val
}
//Common struct contains OS and Tool version properties and also embeds OS interface
type Common struct {
types.OSTypeInstaller
OSVersion string
ToolVersion semver.Version
KubeConfig string
Master string
}
//SetOSInterface defines a method to set the implemtation of the OS interface
func (co *Common) SetOSInterface(intf types.OSTypeInstaller) {
co.OSTypeInstaller = intf
}
//GetPackageManager get package manager of OS
func GetPackageManager() string {
cmd := NewCommand("command -v apt || command -v yum")
err := cmd.Exec()
if err != nil {
fmt.Println(err)
return ""
}
if strings.HasSuffix(cmd.GetStdOut(), APT) {
return APT
} else if strings.HasSuffix(cmd.GetStdOut(), YUM) {
return YUM
} else {
return ""
}
}
//GetOSInterface helps in returning OS specific object which implements OSTypeInstaller interface.
func GetOSInterface() types.OSTypeInstaller {
switch GetPackageManager() {
case APT:
return &DebOS{}
case YUM:
return &RpmOS{}
default:
fmt.Println("Failed to detect supported package manager command(apt, yum), exit")
panic("Failed to detect supported package manager command(apt, yum), exit")
}
}
// IsCloudCore identifies if the node is having cloudcore already running.
// If so, then return true, else it can used as edge node and initialise it.
func IsCloudCore() (types.ModuleRunning, error) {
osType := GetOSInterface()
cloudCoreRunning, err := osType.IsKubeEdgeProcessRunning(KubeCloudBinaryName)
if err != nil {
return types.NoneRunning, err
}
if cloudCoreRunning {
return types.KubeEdgeCloudRunning, nil
}
// check the process, and then check the service
edgeCoreRunning, err := osType.IsKubeEdgeProcessRunning(KubeEdgeBinaryName)
if err != nil {
return types.NoneRunning, err
}
if edgeCoreRunning {
return types.KubeEdgeEdgeRunning, nil
}
edgeCoreRunning, err = isEdgeCoreServiceRunning("edge")
if err != nil {
return types.NoneRunning, err
}
if edgeCoreRunning {
return types.KubeEdgeEdgeRunning, nil
}
edgeCoreRunning, err = isEdgeCoreServiceRunning("edgecore")
if err != nil {
return types.NoneRunning, err
}
if edgeCoreRunning {
return types.KubeEdgeEdgeRunning, nil
}
return types.NoneRunning, nil
}
// GetLatestVersion return the latest non-prerelease, non-draft version of kubeedge in releases
func GetLatestVersion() (string, error) {
//Download the tar from repo
versionURL := "curl -k " + latestReleaseVersionURL
cmd := exec.Command("sh", "-c", versionURL)
latestReleaseData, err := cmd.Output()
if err != nil {
return "", err
}
return string(latestReleaseData), nil
}
// build Config from flags
func BuildConfig(kubeConfig, master string) (conf *rest.Config, err error) {
config, err := clientcmd.BuildConfigFromFlags(master, kubeConfig)
if err != nil {
return nil, err
}
return config, nil
}
// isK8SComponentInstalled checks if said K8S version is already installed in the host
func isK8SComponentInstalled(kubeConfig, master string) error {
config, err := BuildConfig(kubeConfig, master)
if err != nil {
return fmt.Errorf("Failed to build config, err: %v", err)
}
discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return fmt.Errorf("Failed to init discovery client, err: %v", err)
}
discoveryClient.RESTClient().Post()
serverVersion, err := discoveryClient.ServerVersion()
if err != nil {
return fmt.Errorf("Failed to get the version of K8s master, please check whether K8s was successfully installed, err: %v", err)
}
return checkKubernetesVersion(serverVersion)
}
func checkKubernetesVersion(serverVersion *version.Info) error {
reg := regexp.MustCompile(`[[:digit:]]*`)
minorVersion := reg.FindString(serverVersion.Minor)
k8sMinorVersion, err := strconv.Atoi(minorVersion)
if err != nil {
return fmt.Errorf("Could not parse the minor version of K8s, error: %s", err)
}
if k8sMinorVersion >= types.DefaultK8SMinimumVersion {
return nil
}
return fmt.Errorf("Your minor version of K8s is lower than %d, please reinstall newer version", types.DefaultK8SMinimumVersion)
}
//installKubeEdge downloads the provided version of KubeEdge.
//Untar's in the specified location /etc/kubeedge/ and then copies
//the binary to excecutables' path (eg: /usr/local/bin)
func installKubeEdge(options types.InstallOptions, arch string, version semver.Version) error {
// create the storage path of the kubeedge installation packages
if options.TarballPath == "" {
options.TarballPath = KubeEdgePath
} else {
err := os.MkdirAll(options.TarballPath, os.ModePerm)
if err != nil {
return fmt.Errorf("not able to create %s folder path", options.TarballPath)
}
}
err := os.MkdirAll(KubeEdgePath, os.ModePerm)
if err != nil {
return fmt.Errorf("not able to create %s folder path", KubeEdgePath)
}
if arch == "armhf" {
arch = "arm"
}
//Check if the same version exists, then skip the download and just checksum for it
//and if checksum failed, there will be a option to choose to continue to untar or quit.
//checksum available at download URL. So that both can be compared to see if
//proper download has happened and then only proceed further.
//Currently it is missing and once checksum is in place, checksum check required
//to be added here.
dirname := fmt.Sprintf("kubeedge-v%s-linux-%s", version, arch)
filename := fmt.Sprintf("kubeedge-v%s-linux-%s.tar.gz", version, arch)
checksumFilename := fmt.Sprintf("checksum_kubeedge-v%s-linux-%s.tar.gz.txt", version, arch)
filePath := fmt.Sprintf("%s/%s", options.TarballPath, filename)
if _, err = os.Stat(filePath); err == nil {
fmt.Printf("Expected or Default KubeEdge version %v is already downloaded and will checksum for it. \n", version)
if success, _ := checkSum(filename, checksumFilename, version, options.TarballPath); !success {
fmt.Printf("%v in your path checksum failed and do you want to delete this file and try to download again? \n", filename)
for {
confirm, err := askForconfirm()
if err != nil {
fmt.Println(err.Error())
continue
}
if confirm {
cmdStr := fmt.Sprintf("cd %s && rm -f %s", options.TarballPath, filename)
if err := NewCommand(cmdStr).Exec(); err != nil {
return err
}
klog.Infof("%v have been deleted and will try to download again", filename)
if err := retryDownload(filename, checksumFilename, version, options.TarballPath); err != nil {
return err
}
} else {
klog.Warningf("failed to checksum and will continue to install.")
}
break
}
} else {
fmt.Println("Expected or Default KubeEdge version", version, "is already downloaded")
}
} else if !os.IsNotExist(err) {
return err
} else {
if err := retryDownload(filename, checksumFilename, version, options.TarballPath); err != nil {
return err
}
return nil
}
if err := downloadServiceFile(options.ComponentType, version, KubeEdgePath); err != nil {
return fmt.Errorf("fail to download service file,error:{%s}", err.Error())
}
var untarFileAndMoveCloudCore, untarFileAndMoveEdgeCore string
if options.ComponentType == types.CloudCore {
untarFileAndMoveCloudCore = fmt.Sprintf("cd %s && tar -C %s -xvzf %s && cp %s/%s/cloud/cloudcore/%s %s/",
options.TarballPath, options.TarballPath, filename, options.TarballPath, dirname, KubeCloudBinaryName, KubeEdgeUsrBinPath)
cmd := NewCommand(untarFileAndMoveCloudCore)
if err := cmd.Exec(); err != nil {
return err
}
fmt.Println(cmd.GetStdOut())
} else if options.ComponentType == types.EdgeCore {
untarFileAndMoveEdgeCore = fmt.Sprintf("cd %s && tar -C %s -xvzf %s && cp %s/%s/edge/%s %s/",
options.TarballPath, options.TarballPath, filename, options.TarballPath, dirname, KubeEdgeBinaryName, KubeEdgePath)
cmd := NewCommand(untarFileAndMoveEdgeCore)
if err := cmd.Exec(); err != nil {
return err
}
fmt.Println(cmd.GetStdOut())
}
return nil
}
//runEdgeCore sets the environment variable GOARCHAIUS_CONFIG_PATH for the configuration path
//and the starts edgecore with logs being captured
func runEdgeCore(version semver.Version) error {
// create the log dir for kubeedge
err := os.MkdirAll(KubeEdgeLogPath, os.ModePerm)
if err != nil {
return fmt.Errorf("not able to create %s folder path", KubeEdgeLogPath)
}
var binExec string
systemdExist := hasSystemd()
edgecoreServiceName := "edgecore"
if systemdExist {
binExec = fmt.Sprintf("sudo ln /etc/kubeedge/%s.service /etc/systemd/system/%s.service && sudo systemctl daemon-reload && sudo systemctl enable %s && sudo systemctl start %s", edgecoreServiceName, edgecoreServiceName, edgecoreServiceName, edgecoreServiceName)
} else {
binExec = fmt.Sprintf("%s > %skubeedge/edge/%s.log 2>&1 &", KubeEdgeBinaryName, KubeEdgePath, KubeEdgeBinaryName)
}
cmd := NewCommand(binExec)
cmd.Cmd.Env = os.Environ()
env := fmt.Sprintf("GOARCHAIUS_CONFIG_PATH=%skubeedge/edge", KubeEdgePath)
cmd.Cmd.Env = append(cmd.Cmd.Env, env)
if err := cmd.Exec(); err != nil {
return err
}
fmt.Println(cmd.GetStdOut())
if systemdExist {
fmt.Printf("KubeEdge edgecore is running, For logs visit: journalctl -u %s.service -b\n", edgecoreServiceName)
} else {
fmt.Println("KubeEdge edgecore is running, For logs visit: ", KubeEdgeLogPath+KubeEdgeBinaryName+".log")
}
return nil
}
// killKubeEdgeBinary will search for KubeEdge process and forcefully kill it
func killKubeEdgeBinary(proc string) error {
var binExec string
if proc == "cloudcore" {
binExec = fmt.Sprintf("pkill %s", proc)
} else {
systemdExist := hasSystemd()
var serviceName string
if running, err := isEdgeCoreServiceRunning("edge"); err == nil && running {
serviceName = "edge"
}
if running, err := isEdgeCoreServiceRunning("edgecore"); err == nil && running {
serviceName = "edgecore"
}
if systemdExist {
// remove the system service.
binExec = fmt.Sprintf("sudo systemctl stop %s.service && sudo rm /etc/systemd/system/%s.service && sudo systemctl daemon-reload && systemctl reset-failed", serviceName, serviceName)
} else {
binExec = fmt.Sprintf("pkill %s", proc)
}
}
cmd := NewCommand(binExec)
if err := cmd.Exec(); err != nil {
return err
}
fmt.Println("KubeEdge", proc, "is stopped, For logs visit: ", KubeEdgeLogPath+proc+".log")
return nil
}
//isKubeEdgeProcessRunning checks if the given process is running or not
func isKubeEdgeProcessRunning(proc string) (bool, error) {
procRunning := fmt.Sprintf("pidof %s 2&>1", proc)
cmd := NewCommand(procRunning)
err := cmd.Exec()
if cmd.ExitCode == 0 {
return true, nil
} else if cmd.ExitCode == 1 {
return false, nil
}
return false, err
}
func isEdgeCoreServiceRunning(serviceName string) (bool, error) {
serviceRunning := fmt.Sprintf("systemctl list-unit-files | grep enabled | grep %s ", serviceName)
if err := NewCommand(serviceRunning).Exec(); err != nil {
return false, err
}
return true, nil
}
// check if systemd exist
func hasSystemd() bool {
cmd := "file /sbin/init"
if err := NewCommand(cmd).Exec(); err != nil {
return false
}
return true
}
func checkSum(filename, checksumFilename string, version semver.Version, tarballPath string) (bool, error) {
//Verify the tar with checksum
fmt.Printf("%s checksum: \n", filename)
getActualCheckSum := NewCommand(fmt.Sprintf("cd %s && sha512sum %s | awk '{split($0,a,\"[ ]\"); print a[1]}'", tarballPath, filename))
if err := getActualCheckSum.Exec(); err != nil {
return false, err
}
fmt.Printf("%s content: \n", checksumFilename)
getDesiredCheckSum := NewCommand(fmt.Sprintf("wget -qO- %s/v%s/%s", KubeEdgeDownloadURL, version, checksumFilename))
if err := getDesiredCheckSum.Exec(); err != nil {
return false, err
}
if getDesiredCheckSum.GetStdOut() != getActualCheckSum.GetStdOut() {
fmt.Printf("Failed to verify the checksum of %s ... \n\n", filename)
return false, nil
}
return true, nil
}
func retryDownload(filename, checksumFilename string, version semver.Version, tarballPath string) error {
try := 0
for ; try < downloadRetryTimes; try++ {
//Download the tar from repo
dwnldURL := fmt.Sprintf("cd %s && wget -k --no-check-certificate --progress=bar:force %s/v%s/%s",
tarballPath, KubeEdgeDownloadURL, version, filename)
if err := NewCommand(dwnldURL).Exec(); err != nil {
return err
}
//Verify the tar with checksum
fmt.Printf("%s checksum: \n", filename)
getActualCheckSum := NewCommand(fmt.Sprintf("cd %s && sha512sum %s | awk '{split($0,a,\"[ ]\"); print a[1]}'", tarballPath, filename))
if err := getActualCheckSum.Exec(); err != nil {
return err
}
fmt.Printf("%s content: \n", checksumFilename)
getDesiredCheckSum := NewCommand(fmt.Sprintf("wget -qO- %s/v%s/%s", KubeEdgeDownloadURL, version, checksumFilename))
if err := getDesiredCheckSum.Exec(); err != nil {
return err
}
if getActualCheckSum.GetStdOut() == getDesiredCheckSum.GetStdOut() {
break
} else {
fmt.Printf("Failed to verify the checksum of %s, try to download it again ... \n\n", filename)
//Cleanup the downloaded files
return NewCommand(fmt.Sprintf("cd %s && rm -f %s", tarballPath, filename)).Exec()
}
}
if try == downloadRetryTimes {
return fmt.Errorf("failed to download %s", filename)
}
return nil
}
// Compressed folders or files
func Compress(tarName string, paths []string) (err error) {
tarFile, err := os.Create(tarName)
if err != nil {
return err
}
defer func() {
err = tarFile.Close()
}()
absTar, err := filepath.Abs(tarName)
if err != nil {
return err
}
// enable compression if file ends in .gz
tw := tar.NewWriter(tarFile)
if strings.HasSuffix(tarName, ".gz") || strings.HasSuffix(tarName, ".gzip") {
gz := gzip.NewWriter(tarFile)
defer gz.Close()
tw = tar.NewWriter(gz)
}
defer tw.Close()
// walk each specified path and add encountered file to tar
for _, path := range paths {
// validate path
path = filepath.Clean(path)
absPath, err := filepath.Abs(path)
if err != nil {
fmt.Println(err)
continue
}
if absPath == absTar {
fmt.Printf("tar file %s cannot be the source\n", tarName)
continue
}
if absPath == filepath.Dir(absTar) {
fmt.Printf("tar file %s cannot be in source %s\n", tarName, absPath)
continue
}
walker := func(file string, finfo os.FileInfo, err error) error {
if err != nil {
return err
}
// fill in header info using func FileInfoHeader
hdr, err := tar.FileInfoHeader(finfo, finfo.Name())
if err != nil {
return err
}
relFilePath := file
if filepath.IsAbs(path) {
relFilePath, err = filepath.Rel(path, file)
if err != nil {
return err
}
}
// ensure header has relative file path
hdr.Name = relFilePath
if err := tw.WriteHeader(hdr); err != nil {
return err
}
// if path is a dir, dont continue
if finfo.Mode().IsDir() {
return nil
}
// add file to tar
srcFile, err := os.Open(file)
if err != nil {
return err
}
defer srcFile.Close()
_, err = io.Copy(tw, srcFile)
if err != nil {
return err
}
return nil
}
// build tar
if err := filepath.Walk(path, walker); err != nil {
fmt.Printf("failed to add %s to tar: %s\n", path, err)
}
}
return nil
}
func askForconfirm() (bool, error) {
var s string
fmt.Println("[y/N]: ")
if _, err := fmt.Scan(&s); err != nil {
return false, err
}
s = strings.ToLower(strings.TrimSpace(s))
if s == "y" {
return true, nil
} else if s == "n" {
return false, nil
} else {
return false, fmt.Errorf("Invalid Input")
}
}
// Execute shell script and filter
func ExecShellFilter(c string) (string, error) {
cmd := exec.Command("sh", "-c", c)
o, err := cmd.Output()
str := strings.Replace(string(o), " ", "", -1)
str = strings.Replace(str, "\n", "", -1)
if err != nil {
return str, fmt.Errorf("exec fail: %s, %s", c, err)
}
return str, nil
}
func FileExists(path string) bool {
_, err := os.Stat(path)
if err != nil {
return os.IsExist(err)
}
return true
}
func ParseEdgecoreConfig(edgecorePath string) (*v1alpha1.EdgeCoreConfig, error) {
edgeCoreConfig := v1alpha1.NewDefaultEdgeCoreConfig()
if err := edgeCoreConfig.Parse(edgecorePath); err != nil {
return nil, err
}
return edgeCoreConfig, nil
}
// Determine if it is in the array
func IsContain(items []string, item string) bool {
for _, eachItem := range items {
if eachItem == item {
return true
}
}
return false
}
//print fail
func PrintFail(cmd string, s string) {
v := fmt.Sprintf("|%s %s failed|", s, cmd)
printResult(v)
}
//print success
func PrintSuccedd(cmd string, s string) {
v := fmt.Sprintf("|%s %s succeed|", s, cmd)
printResult(v)
}
func printResult(s string) {
line := "|"
if len(s) > 2 {
for i := 0; i < len(s)-2; i++ {
line = line + "-"
}
line = line + "|"
}
fmt.Println("")
fmt.Println(line)
fmt.Println(s)
fmt.Println(line)
}
func downloadServiceFile(componentType types.ComponentType, version semver.Version, storeDir string) error {
// No need to download if
// 1. the systemd not exists
// 2. the service file already exists
if hasSystemd() {
var ServiceFileName string
switch componentType {
case types.CloudCore:
ServiceFileName = CloudServiceFile
case types.EdgeCore:
ServiceFileName = EdgeServiceFile
default:
return fmt.Errorf("component type %s not support", componentType)
}
ServiceFilePath := storeDir + "/" + ServiceFileName
strippedVersion := fmt.Sprintf("%d.%d", version.Major, version.Minor)
ServiceFileURL := fmt.Sprintf(ServiceFileURLFormat, strippedVersion, ServiceFileName)
if _, err := os.Stat(ServiceFilePath); err != nil {
if os.IsNotExist(err) {
cmdStr := fmt.Sprintf("cd %s && sudo -E wget -t %d -k --no-check-certificate %s", storeDir, RetryTimes, ServiceFileURL)
fmt.Printf("[Run as service] start to download service file for %s\n", componentType)
if err := NewCommand(cmdStr).Exec(); err != nil {
return err
}
fmt.Printf("[Run as service] success to download service file for %s\n", componentType)
} else {
return err
}
} else {
fmt.Printf("[Run as service] service file already exisits in %s, skip download\n", ServiceFilePath)
}
}
return nil
}
| 1 | 20,090 | Why change it? | kubeedge-kubeedge | go |
@@ -11,7 +11,7 @@ import {
textStyle,
colorStyle,
borders,
-} from '../src'
+} from '../src/index'
const theme = {
colors: { | 1 | import test from 'ava'
import {
space,
color,
width,
fontSize,
size,
gridGap,
gridRowGap,
gridColumnGap,
textStyle,
colorStyle,
borders,
} from '../src'
const theme = {
colors: {
blue: '#07c',
black: '#111',
},
}
test('returns color values from theme', t => {
const a = color({ theme, color: 'blue', bg: 'black' })
t.deepEqual(a, { color: '#07c', backgroundColor: '#111' })
})
test('returns raw color values', t => {
const a = color({
theme,
color: 'inherit',
bg: 'tomato',
})
t.deepEqual(a, { color: 'inherit', backgroundColor: 'tomato' })
})
test('backgroundColor prop overrides bg prop', t => {
const a = color({
backgroundColor: 'tomato',
bg: 'blue',
})
t.deepEqual(a, { backgroundColor: 'tomato' })
})
test('returns a pixel font-size', t => {
const a = fontSize({ fontSize: 48 })
t.deepEqual(a, { fontSize: '48px' })
})
test('uses a default font-size scale', t => {
const a = fontSize({ fontSize: 2 })
t.deepEqual(a, { fontSize: '16px' })
})
test('returns a string font-size', t => {
const a = fontSize({ fontSize: '2em' })
t.deepEqual(a, { fontSize: '2em' })
})
test('returns a percentage based width', t => {
const a = width({ width: 1 / 2 })
t.deepEqual(a, { width: '50%' })
})
test('returns a pixel based width', t => {
const a = width({ width: 256 })
t.deepEqual(a, { width: '256px' })
})
test('returns a string width', t => {
const a = width({ width: 'auto' })
t.deepEqual(a, { width: 'auto' })
})
test('returns an array of style objects', t => {
const styles = space({
m: '4px',
})
t.deepEqual(styles, { margin: '4px' })
})
test('returns 0 values', t => {
const styles = space({ m: 0 })
t.deepEqual(styles, { margin: 0 })
})
test('returns negative pixel values', t => {
const styles = space({ m: -2 })
t.deepEqual(styles, { margin: '-8px' })
})
test('returns negative em values', t => {
const styles = space({ m: '-16em' })
t.deepEqual(styles, { margin: '-16em' })
})
test('returns negative theme values', t => {
const styles = space({
theme: {
space: [0, 4, 8],
},
m: -2,
})
t.deepEqual(styles, { margin: '-8px' })
})
test('returns positive theme values', t => {
const styles = space({
theme: {
space: [0, '1em', '2em'],
},
m: 2,
})
t.deepEqual(styles, { margin: '2em' })
})
test('returns responsive values', t => {
const styles = space({
m: [0, 2, 3],
})
t.deepEqual(styles, {
margin: 0,
'@media screen and (min-width: 40em)': { margin: '8px' },
'@media screen and (min-width: 52em)': { margin: '16px' },
})
})
test('returns aliased values', t => {
const styles = space({
px: 2,
})
t.deepEqual(styles, { paddingLeft: '8px', paddingRight: '8px' })
})
test('returns string values from theme', t => {
const styles = space({
theme: {
space: [0, '1em'],
},
padding: 1,
})
t.deepEqual(styles, { padding: '1em' })
})
test('returns negative string values from theme', t => {
const styles = space({
theme: {
space: [0, '1em'],
},
margin: -1,
})
t.deepEqual(styles, { margin: '-1em' })
})
test('returns values from theme object', t => {
const styles = space({
theme: {
space: { sm: 1 },
},
margin: 'sm',
})
t.deepEqual(styles, { margin: '1px' })
})
test('pl prop sets paddingLeft', t => {
const styles = space({ pl: 2 })
t.deepEqual(styles, { paddingLeft: '8px' })
})
test('pl prop sets paddingLeft 0', t => {
const styles = space({ pl: 0 })
t.deepEqual(styles, { paddingLeft: 0 })
})
test('px prop overrides pl prop', t => {
const styles = space({
pl: 1,
px: 2,
})
t.deepEqual(styles, { paddingLeft: '8px', paddingRight: '8px' })
})
test('py prop overrides pb prop', t => {
const styles = space({
pb: 1,
py: 2,
})
t.deepEqual(styles, { paddingTop: '8px', paddingBottom: '8px' })
})
test('mx prop overrides mr prop', t => {
const styles = space({
mr: 1,
mx: 2,
})
t.deepEqual(styles, { marginLeft: '8px', marginRight: '8px' })
})
test('my prop overrides mt prop', t => {
const styles = space({
mt: 1,
my: 2,
})
t.deepEqual(styles, { marginTop: '8px', marginBottom: '8px' })
})
test('margin overrides m prop', t => {
const styles = space({
m: 1,
margin: 2,
})
t.deepEqual(styles, { margin: '8px' })
})
test('space includes propTypes', t => {
const { propTypes } = space
t.is(typeof propTypes, 'object')
t.is(typeof propTypes.m, 'function')
})
test('size returns width and height', t => {
const styles = size({
size: 4,
})
t.deepEqual(styles, { width: '4px', height: '4px' })
})
// grid
test('gridGap returns a scalar style', t => {
const a = gridGap({
theme: {
space: [0, 2, 4, 8],
},
gridGap: 3,
})
t.deepEqual(a, { gridGap: '8px' })
})
test('gridGap uses the default scale', t => {
const a = gridGap({
theme: {},
gridGap: 2,
})
t.deepEqual(a, { gridGap: '8px' })
})
test('gridRowGap returns a scalar style', t => {
const a = gridRowGap({
theme: {
space: [0, 2, 4, 8],
},
gridRowGap: 3,
})
t.deepEqual(a, { gridRowGap: '8px' })
})
test('gridRowGap uses the default scale', t => {
const a = gridRowGap({
theme: {},
gridRowGap: 2,
})
t.deepEqual(a, { gridRowGap: '8px' })
})
test('gridColumnGap returns a scalar style', t => {
const a = gridColumnGap({
theme: {
space: [0, 2, 4, 8],
},
gridColumnGap: 3,
})
t.deepEqual(a, { gridColumnGap: '8px' })
})
test('gridColumnGap uses the default scale', t => {
const a = gridColumnGap({
theme: {},
gridColumnGap: 2,
})
t.deepEqual(a, { gridColumnGap: '8px' })
})
test('textStyle prop returns theme.textStyles object', t => {
const a = textStyle({
theme: {
textStyles: {
heading: {
fontWeight: 'bold',
lineHeight: 1.25,
},
},
},
textStyle: 'heading',
})
t.deepEqual(a, {
fontWeight: 'bold',
lineHeight: 1.25,
})
})
test('colors prop returns theme.colorStyles object', t => {
const a = colorStyle({
theme: {
colorStyles: {
dark: {
color: '#fff',
backgroundColor: '#000',
},
},
},
colors: 'dark',
})
t.deepEqual(a, {
color: '#fff',
backgroundColor: '#000',
})
})
test('borders prop returns correct sequence', t => {
const a = borders({
borderBottom: '1px solid',
borderWidth: '2px',
borderStyle: 'dashed',
borderColor: 'red',
})
t.deepEqual(a, {
borderBottom: '1px solid',
borderWidth: '2px',
borderStyle: 'dashed',
borderColor: 'red',
})
})
| 1 | 5,047 | nb: this change is unnecessary | styled-system-styled-system | js |
@@ -120,6 +120,7 @@ type Object struct {
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
+ publicLink string // Public Link for the object
sha1 string // SHA-1 of the object content
}
| 1 | // Package box provides an interface to the Box
// object storage system.
package box
// FIXME Box only supports file names of 255 characters or less. Names
// that will not be supported are those that contain non-printable
// ascii, / or \, names with trailing spaces, and the special names
// “.” and “..”.
// FIXME box can copy a directory
import (
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
rootID = "0" // ID of root folder is always this
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Box App Client Id.\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload.",
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
}
// Object describes a box object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
hasMetaData bool // whether info below has been set
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
sha1 string // SHA-1 of the object content
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("box root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses an box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
authRety := false
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRety = true
fs.Debugf(nil, "Should retry: %v", err)
}
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// substitute reserved characters for box
func replaceReservedChars(x string) string {
// Backslash for FULLWIDTH REVERSE SOLIDUS
return strings.Replace(x, "\\", "\", -1)
}
// restore reserved characters for box
func restoreReservedChars(x string) string {
// FULLWIDTH REVERSE SOLIDUS for Backslash
return strings.Replace(x, "\", "\\", -1)
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
info = item
return true
}
return false
})
if err != nil {
return nil, err
}
if !found {
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
// Decode error response
errResponse := new(api.Error)
err := rest.DecodeJSON(resp, &errResponse)
if err != nil {
fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
if errResponse.Code == "" {
errResponse.Code = resp.Status
}
if errResponse.Status == 0 {
errResponse.Status = resp.StatusCode
}
return errResponse
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.UploadCutoff < minUploadCutoff {
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
}
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure Box: %v", err)
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath("")
return err
})
// Get rootID
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
err = f.dirCache.FindRoot(false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
newF := *f
newF.dirCache = dircache.New(newRoot, rootID, &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := newF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return &newF, fs.ErrorIsFile
}
return f, nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func (f *Fs) rootSlash() string {
if f.root == "" {
return f.root
}
return f.root + "/"
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
var err error
if info != nil {
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData() // reads info and meta, returning an error
}
if err != nil {
return nil, err
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
pathIDOut = item.ID
return true
}
return false
})
return pathIDOut, found, err
}
// fieldsValue creates a url.Values with fields set to those in api.Item
func fieldsValue() url.Values {
values := url.Values{}
values.Set("fields", api.ItemFields)
return values
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response
var info *api.Item
opts := rest.Opts{
Method: "POST",
Path: "/folders",
Parameters: fieldsValue(),
}
mkdir := api.CreateFolder{
Name: replaceReservedChars(leaf),
Parent: api.Parent{
ID: pathID,
},
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
return "", err
}
// fmt.Printf("...Id %q\n", *info.Id)
return info.ID, nil
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/" + dirID + "/items",
Parameters: fieldsValue(),
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
OUTER:
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
}
for i := range result.Entries {
item := &result.Entries[i]
if item.Type == api.ItemTypeFolder {
if filesOnly {
continue
}
} else if item.Type == api.ItemTypeFile {
if directoriesOnly {
continue
}
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
if item.ItemStatus != api.ItemStatusActive {
continue
}
item.Name = restoreReservedChars(item.Name)
if fn(item) {
found = true
break OUTER
}
}
offset += result.Limit
if offset >= result.TotalCount {
break
}
}
return
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil {
return nil, err
}
var iErr error
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups
f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir?
entries = append(entries, d)
} else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(remote, info)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
return false
})
if err != nil {
return nil, err
}
if iErr != nil {
return nil, iErr
}
return entries, nil
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object, leaf, directoryID and error
//
// Used to create new objects
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
if err != nil {
return
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, leaf, directoryID, nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return exisitingObj, exisitingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(in, src)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// PutUnchecked the object into the container
//
// This will produce an error if the object already exists
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
modTime := src.ModTime()
o, _, _, err := f.createObject(remote, modTime, size)
if err != nil {
return nil, err
}
return o, o.Update(in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(dir, true)
}
return err
}
// deleteObject removes an object by ID
func (f *Fs) deleteObject(id string) error {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/" + id,
NoResponse: true,
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir)
if root == "" {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(false)
if err != nil {
return err
}
rootID, err := dc.FindDir(dir, false)
if err != nil {
return err
}
opts := rest.Opts{
Method: "DELETE",
Path: "/folders/" + rootID,
Parameters: url.Values{},
NoResponse: true,
}
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err := srcObj.readMetaData()
if err != nil {
return nil, err
}
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil {
return nil, err
}
// Copy the object
opts := rest.Opts{
Method: "POST",
Path: "/files/" + srcObj.id + "/copy",
Parameters: fieldsValue(),
}
replacedLeaf := replaceReservedChars(leaf)
copyFile := api.CopyFile{
Name: replacedLeaf,
Parent: api.Parent{
ID: directoryID,
},
}
var resp *http.Response
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, ©File, &info)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
err = dstObj.setMetaData(info)
if err != nil {
return nil, err
}
return dstObj, nil
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
return f.purgeCheck("", false)
}
// move a file or folder
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
// Move the object
opts := rest.Opts{
Method: "PUT",
Path: endpoint + id,
Parameters: fieldsValue(),
}
move := api.UpdateFileMove{
Name: replaceReservedChars(leaf),
Parent: api.Parent{
ID: directoryID,
},
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return info, nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil {
return nil, err
}
// Do the move
info, err := f.move("/files/", srcObj.id, leaf, directoryID)
if err != nil {
return nil, err
}
err = dstObj.setMetaData(info)
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, directoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil {
return err
}
// Do the move
_, err = f.move("/folders/", srcID, leaf, directoryID)
if err != nil {
return err
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
f.dirCache.ResetRoot()
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.SHA1)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// srvPath returns a path for use in server
func (o *Object) srvPath() string {
return replaceReservedChars(o.fs.rootSlash() + o.remote)
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.SHA1 {
return "", hash.ErrUnsupported
}
return o.sha1, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
}
return o.size
}
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type != api.ItemTypeFile {
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
}
o.hasMetaData = true
o.size = int64(info.Size)
o.sha1 = info.SHA1
o.modTime = info.ModTime()
o.id = info.ID
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if o.hasMetaData {
return nil
}
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
return fs.ErrorObjectNotFound
}
}
return err
}
return o.setMetaData(info)
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
}
// setModTime sets the modification time of the local fs object
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/files/" + o.id,
Parameters: fieldsValue(),
}
update := api.UpdateFileModTime{
ContentModifiedAt: api.Time(modTime),
}
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
return shouldRetry(resp, err)
})
return info, err
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
info, err := o.setModTime(modTime)
if err != nil {
return err
}
return o.setMetaData(info)
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.id == "" {
return nil, errors.New("can't download - no id")
}
fs.FixRangeOption(options, o.size)
var resp *http.Response
opts := rest.Opts{
Method: "GET",
Path: "/files/" + o.id + "/content",
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{
Name: replaceReservedChars(leaf),
ContentModifiedAt: api.Time(modTime),
ContentCreatedAt: api.Time(modTime),
Parent: api.Parent{
ID: directoryID,
},
}
var resp *http.Response
var result api.FolderItems
opts := rest.Opts{
Method: "POST",
Body: in,
MultipartMetadataName: "attributes",
MultipartContentName: "contents",
MultipartFileName: upload.Name,
RootURL: uploadURL,
}
// If object has an ID then it is existing so create a new version
if o.id != "" {
opts.Path = "/files/" + o.id + "/content"
} else {
opts.Path = "/files/content"
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
return shouldRetry(resp, err)
})
if err != nil {
return err
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("failed to upload %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
modTime := src.ModTime()
remote := o.Remote()
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
if err != nil {
return err
}
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(in, leaf, directoryID, modTime)
} else {
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
}
return err
}
// Remove an object
func (o *Object) Remove() error {
return o.fs.deleteObject(o.id)
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.id
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)
| 1 | 7,456 | Is the publicLink mostly empty? I'm just wondering how much this will bloat memory usage as sometimes rclone has millions of Object~s in memory. | rclone-rclone | go |
@@ -65,6 +65,11 @@ type OrderSpec struct {
// validation process.
// This field must match the corresponding field on the DER encoded CSR.
DNSNames []string
+
+ // IPAddresses is a list of IP addresses that should be included as part of the Order
+ // validation process.
+ // This field must match the corresponding field on the DER encoded CSR.
+ IPAddresses []string `json:"ipAddresses,omitempty"`
}
type OrderStatus struct { | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package acme
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/internal/apis/meta"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Order is a type to represent an Order with an ACME server
type Order struct {
metav1.TypeMeta
metav1.ObjectMeta
Spec OrderSpec
Status OrderStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// OrderList is a list of Orders
type OrderList struct {
metav1.TypeMeta
metav1.ListMeta
Items []Order
}
type OrderSpec struct {
// Certificate signing request bytes in DER encoding.
// This will be used when finalizing the order.
// This field must be set on the order.
Request []byte
// IssuerRef references a properly configured ACME-type Issuer which should
// be used to create this Order.
// If the Issuer does not exist, processing will be retried.
// If the Issuer is not an 'ACME' Issuer, an error will be returned and the
// Order will be marked as failed.
IssuerRef cmmeta.ObjectReference
// CommonName is the common name as specified on the DER encoded CSR.
// If specified, this value must also be present in `dnsNames`.
// This field must match the corresponding field on the DER encoded CSR.
CommonName string
// DNSNames is a list of DNS names that should be included as part of the Order
// validation process.
// This field must match the corresponding field on the DER encoded CSR.
DNSNames []string
}
type OrderStatus struct {
// URL of the Order.
// This will initially be empty when the resource is first created.
// The Order controller will populate this field when the Order is first processed.
// This field will be immutable after it is initially set.
URL string
// FinalizeURL of the Order.
// This is used to obtain certificates for this order once it has been completed.
FinalizeURL string
// Certificate is a copy of the PEM encoded certificate for this Order.
// This field will be populated after the order has been successfully
// finalized with the ACME server, and the order has transitioned to the
// 'valid' state.
Certificate []byte
// State contains the current state of this Order resource.
// States 'success' and 'expired' are 'final'
State State
// Reason optionally provides more information about a why the order is in
// the current state.
Reason string
// Authorizations contains data returned from the ACME server on what
// authorizations must be completed in order to validate the DNS names
// specified on the Order.
Authorizations []ACMEAuthorization
// FailureTime stores the time that this order failed.
// This is used to influence garbage collection and back-off.
FailureTime *metav1.Time
}
// ACMEAuthorization contains data returned from the ACME server on an
// authorization that must be completed in order validate a DNS name on an ACME
// Order resource.
type ACMEAuthorization struct {
// URL is the URL of the Authorization that must be completed
URL string
// Identifier is the DNS name to be validated as part of this authorization
Identifier string
// Wildcard will be true if this authorization is for a wildcard DNS name.
// If this is true, the identifier will be the *non-wildcard* version of
// the DNS name.
// For example, if '*.example.com' is the DNS name being validated, this
// field will be 'true' and the 'identifier' field will be 'example.com'.
Wildcard *bool
// InitialState is the initial state of the ACME authorization when first
// fetched from the ACME server.
// If an Authorization is already 'valid', the Order controller will not
// create a Challenge resource for the authorization. This will occur when
// working with an ACME server that enables 'authz reuse' (such as Let's
// Encrypt's production endpoint).
// If not set and 'identifier' is set, the state is assumed to be pending
// and a Challenge will be created.
// +optional
InitialState State
// Challenges specifies the challenge types offered by the ACME server.
// One of these challenge types will be selected when validating the DNS
// name and an appropriate Challenge resource will be created to perform
// the ACME challenge process.
Challenges []ACMEChallenge
}
// Challenge specifies a challenge offered by the ACME server for an Order.
// An appropriate Challenge resource can be created to perform the ACME
// challenge process.
type ACMEChallenge struct {
// URL is the URL of this challenge. It can be used to retrieve additional
// metadata about the Challenge from the ACME server.
URL string
// Token is the token that must be presented for this challenge.
// This is used to compute the 'key' that must also be presented.
Token string
// Type is the type of challenge being offered, e.g. 'http-01', 'dns-01',
// 'tls-sni-01', etc.
// This is the raw value retrieved from the ACME server.
// Only 'http-01' and 'dns-01' are supported by cert-manager, other values
// will be ignored.
Type string
}
// State represents the state of an ACME resource, such as an Order.
// The possible options here map to the corresponding values in the
// ACME specification.
// Full details of these values can be found here: https://tools.ietf.org/html/draft-ietf-acme-acme-15#section-7.1.6
// Clients utilising this type must also gracefully handle unknown
// values, as the contents of this enumeration may be added to over time.
type State string
const (
// Unknown is not a real state as part of the ACME spec.
// It is used to represent an unrecognised value.
Unknown State = ""
// Valid signifies that an ACME resource is in a valid state.
// If an order is 'valid', it has been finalized with the ACME server and
// the certificate can be retrieved from the ACME server using the
// certificate URL stored in the Order's status subresource.
// This is a final state.
Valid State = "valid"
// Ready signifies that an ACME resource is in a ready state.
// If an order is 'ready', all of its challenges have been completed
// successfully and the order is ready to be finalized.
// Once finalized, it will transition to the Valid state.
// This is a transient state.
Ready State = "ready"
// Pending signifies that an ACME resource is still pending and is not yet ready.
// If an Order is marked 'Pending', the validations for that Order are still in progress.
// This is a transient state.
Pending State = "pending"
// Processing signifies that an ACME resource is being processed by the server.
// If an Order is marked 'Processing', the validations for that Order are currently being processed.
// This is a transient state.
Processing State = "processing"
// Invalid signifies that an ACME resource is invalid for some reason.
// If an Order is marked 'invalid', one of its validations be have invalid for some reason.
// This is a final state.
Invalid State = "invalid"
// Expired signifies that an ACME resource has expired.
// If an Order is marked 'Expired', one of its validations may have expired or the Order itself.
// This is a final state.
Expired State = "expired"
// Errored signifies that the ACME resource has errored for some reason.
// This is a catch-all state, and is used for marking internal cert-manager
// errors such as validation failures.
// This is a final state.
Errored State = "errored"
)
| 1 | 23,453 | I don't think the Internal API types need the json annotations. They are never serialised, right? | jetstack-cert-manager | go |
@@ -45,6 +45,7 @@ type apiClient interface {
GetApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.GetApplicationMostRecentDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.GetApplicationMostRecentDeploymentResponse, error)
CreateDeployment(ctx context.Context, in *pipedservice.CreateDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.CreateDeploymentResponse, error)
ReportApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.ReportApplicationMostRecentDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationMostRecentDeploymentResponse, error)
+ ReportDeploymentCompleted(ctx context.Context, req *pipedservice.ReportDeploymentCompletedRequest, opts ...grpc.CallOption) (*pipedservice.ReportDeploymentCompletedResponse, error)
}
type gitClient interface { | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package trigger provides a piped component
// that detects a list of application should be synced (by new commit, sync command or configuration drift)
// and then sends request to the control-plane to create a new Deployment.
package trigger
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
"google.golang.org/grpc"
"github.com/pipe-cd/pipe/pkg/app/api/service/pipedservice"
"github.com/pipe-cd/pipe/pkg/cache/memorycache"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/git"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
commandCheckInterval = 10 * time.Second
defaultLastTriggeredCommitCacheSize = 500
)
const (
triggeredDeploymentIDKey = "TriggeredDeploymentID"
)
type apiClient interface {
GetApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.GetApplicationMostRecentDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.GetApplicationMostRecentDeploymentResponse, error)
CreateDeployment(ctx context.Context, in *pipedservice.CreateDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.CreateDeploymentResponse, error)
ReportApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.ReportApplicationMostRecentDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationMostRecentDeploymentResponse, error)
}
type gitClient interface {
Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error)
}
type applicationLister interface {
Get(id string) (*model.Application, bool)
List() []*model.Application
}
type commandLister interface {
ListApplicationCommands() []model.ReportableCommand
}
type environmentLister interface {
Get(ctx context.Context, id string) (*model.Environment, error)
}
type notifier interface {
Notify(event model.NotificationEvent)
}
type Trigger struct {
apiClient apiClient
gitClient gitClient
applicationLister applicationLister
commandLister commandLister
environmentLister environmentLister
notifier notifier
config *config.PipedSpec
commitStore *lastTriggeredCommitStore
gitRepos map[string]git.Repo
gracePeriod time.Duration
logger *zap.Logger
}
// NewTrigger creates a new instance for Trigger.
func NewTrigger(
apiClient apiClient,
gitClient gitClient,
appLister applicationLister,
commandLister commandLister,
environmentLister environmentLister,
notifier notifier,
cfg *config.PipedSpec,
gracePeriod time.Duration,
logger *zap.Logger,
) (*Trigger, error) {
cache, err := memorycache.NewLRUCache(defaultLastTriggeredCommitCacheSize)
if err != nil {
return nil, err
}
commitStore := &lastTriggeredCommitStore{
apiClient: apiClient,
cache: cache,
}
t := &Trigger{
apiClient: apiClient,
gitClient: gitClient,
applicationLister: appLister,
commandLister: commandLister,
environmentLister: environmentLister,
notifier: notifier,
config: cfg,
commitStore: commitStore,
gitRepos: make(map[string]git.Repo, len(cfg.Repositories)),
gracePeriod: gracePeriod,
logger: logger.Named("trigger"),
}
return t, nil
}
// Run starts running Trigger until the specified context has done.
// This also waits for its cleaning up before returning.
func (t *Trigger) Run(ctx context.Context) error {
t.logger.Info("start running deployment trigger")
// Pre-clone to cache the registered git repositories.
t.gitRepos = make(map[string]git.Repo, len(t.config.Repositories))
for _, r := range t.config.Repositories {
repo, err := t.gitClient.Clone(ctx, r.RepoID, r.Remote, r.Branch, "")
if err != nil {
t.logger.Error("failed to clone repository",
zap.String("repo-id", r.RepoID),
zap.Error(err),
)
return err
}
t.gitRepos[r.RepoID] = repo
}
commitTicker := time.NewTicker(time.Duration(t.config.SyncInterval))
defer commitTicker.Stop()
commandTicker := time.NewTicker(commandCheckInterval)
defer commandTicker.Stop()
L:
for {
select {
case <-commandTicker.C:
t.checkNewCommands(ctx)
case <-commitTicker.C:
t.checkNewCommits(ctx)
case <-ctx.Done():
break L
}
}
t.logger.Info("deployment trigger has been stopped")
return nil
}
func (t *Trigger) GetLastTriggeredCommitGetter() LastTriggeredCommitGetter {
return t.commitStore
}
func (t *Trigger) checkNewCommands(ctx context.Context) error {
commands := t.commandLister.ListApplicationCommands()
for _, cmd := range commands {
syncCmd := cmd.GetSyncApplication()
if syncCmd == nil {
continue
}
app, ok := t.applicationLister.Get(syncCmd.ApplicationId)
if !ok {
t.logger.Warn("detected an AppSync command for an unregistered application",
zap.String("command", cmd.Id),
zap.String("app-id", syncCmd.ApplicationId),
zap.String("commander", cmd.Commander),
)
continue
}
d, err := t.syncApplication(ctx, app, cmd.Commander, syncCmd.SyncStrategy)
if err != nil {
t.logger.Error("failed to sync application",
zap.String("app-id", app.Id),
zap.Error(err),
)
if err := cmd.Report(ctx, model.CommandStatus_COMMAND_FAILED, nil, nil); err != nil {
t.logger.Error("failed to report command status", zap.Error(err))
}
continue
}
metadata := map[string]string{
triggeredDeploymentIDKey: d.Id,
}
if err := cmd.Report(ctx, model.CommandStatus_COMMAND_SUCCEEDED, metadata, nil); err != nil {
t.logger.Error("failed to report command status", zap.Error(err))
}
}
return nil
}
func (t *Trigger) checkNewCommits(ctx context.Context) error {
if len(t.gitRepos) == 0 {
t.logger.Info("no repositories were configured for this piped")
return nil
}
// List all applications that should be handled by this piped
// and then group them by repository.
var applications = t.listApplications()
// ENHANCEMENT: We may want to apply worker model here to run them concurrently.
for repoID, apps := range applications {
gitRepo, branch, headCommit, err := t.updateRepoToLatest(ctx, repoID)
if err != nil {
continue
}
d := NewDeterminer(gitRepo, headCommit.Hash, t.commitStore, t.logger)
for _, app := range apps {
shouldTrigger, err := d.ShouldTrigger(ctx, app)
if err != nil {
t.logger.Error(fmt.Sprintf("failed to check application: %s", app.Id), zap.Error(err))
continue
}
if !shouldTrigger {
t.commitStore.Put(app.Id, headCommit.Hash)
continue
}
// Build deployment model and send a request to API to create a new deployment.
t.logger.Info("application should be synced because of the new commit")
if _, err := t.triggerDeployment(ctx, app, branch, headCommit, "", model.SyncStrategy_AUTO); err != nil {
t.logger.Error(fmt.Sprintf("failed to trigger application: %s", app.Id), zap.Error(err))
}
t.commitStore.Put(app.Id, headCommit.Hash)
}
}
return nil
}
func (t *Trigger) syncApplication(ctx context.Context, app *model.Application, commander string, syncStrategy model.SyncStrategy) (*model.Deployment, error) {
_, branch, headCommit, err := t.updateRepoToLatest(ctx, app.GitPath.Repo.Id)
if err != nil {
return nil, err
}
// Build deployment model and send a request to API to create a new deployment.
t.logger.Info(fmt.Sprintf("application %s will be synced because of a sync command", app.Id),
zap.String("head-commit", headCommit.Hash),
)
d, err := t.triggerDeployment(ctx, app, branch, headCommit, commander, syncStrategy)
if err != nil {
return nil, err
}
t.commitStore.Put(app.Id, headCommit.Hash)
return d, nil
}
func (t *Trigger) updateRepoToLatest(ctx context.Context, repoID string) (repo git.Repo, branch string, headCommit git.Commit, err error) {
var ok bool
// Find the application repo from pre-loaded ones.
repo, ok = t.gitRepos[repoID]
if !ok {
t.logger.Warn("detected some applications binding with a non existent repository", zap.String("repo-id", repoID))
err = fmt.Errorf("missing repository")
return
}
branch = repo.GetClonedBranch()
// Fetch to update the repository and then
if err = repo.Pull(ctx, branch); err != nil {
if ctx.Err() != context.Canceled {
t.logger.Error("failed to update repository branch",
zap.String("repo-id", repoID),
zap.Error(err),
)
}
return
}
// Get the head commit of the repository.
headCommit, err = repo.GetLatestCommit(ctx)
if err != nil {
// TODO: Find a better way to skip the CANCELLED error log while shutting down.
if ctx.Err() != context.Canceled {
t.logger.Error("failed to get head commit hash",
zap.String("repo-id", repoID),
zap.Error(err),
)
}
return
}
return
}
// listApplications retrieves all applications those should be handled by this piped
// and then groups them by repoID.
func (t *Trigger) listApplications() map[string][]*model.Application {
var (
apps = t.applicationLister.List()
m = make(map[string][]*model.Application)
)
for _, app := range apps {
repoId := app.GitPath.Repo.Id
if _, ok := m[repoId]; !ok {
m[repoId] = []*model.Application{app}
} else {
m[repoId] = append(m[repoId], app)
}
}
return m
}
| 1 | 21,918 | Maybe this was added incidentally. We don't need this function in the interface. | pipe-cd-pipe | go |
@@ -18,8 +18,15 @@ func To(fs http.FileSystem, r *http.Request, to string, replacer httpserver.Repl
// try each rewrite paths
t := ""
+ query := ""
for _, v := range tos {
- t = path.Clean(replacer.Replace(v))
+ t = replacer.Replace(v)
+ tparts := strings.Split(t, "?")
+ t = path.Clean(tparts[0])
+
+ if len(tparts) > 1 {
+ query = tparts[1]
+ }
// add trailing slash for directories, if present
if strings.HasSuffix(v, "/") && !strings.HasSuffix(t, "/") { | 1 | package rewrite
import (
"log"
"net/http"
"net/url"
"path"
"strings"
"github.com/mholt/caddy/caddyhttp/httpserver"
)
// To attempts rewrite. It attempts to rewrite to first valid path
// or the last path if none of the paths are valid.
// Returns true if rewrite is successful and false otherwise.
func To(fs http.FileSystem, r *http.Request, to string, replacer httpserver.Replacer) Result {
tos := strings.Fields(to)
// try each rewrite paths
t := ""
for _, v := range tos {
t = path.Clean(replacer.Replace(v))
// add trailing slash for directories, if present
if strings.HasSuffix(v, "/") && !strings.HasSuffix(t, "/") {
t += "/"
}
// validate file
if isValidFile(fs, t) {
break
}
}
// validate resulting path
u, err := url.Parse(t)
if err != nil {
// Let the user know we got here. Rewrite is expected but
// the resulting url is invalid.
log.Printf("[ERROR] rewrite: resulting path '%v' is invalid. error: %v", t, err)
return RewriteIgnored
}
// take note of this rewrite for internal use by fastcgi
// all we need is the URI, not full URL
r.Header.Set(headerFieldName, r.URL.RequestURI())
// perform rewrite
r.URL.Path = u.Path
if u.RawQuery != "" {
// overwrite query string if present
r.URL.RawQuery = u.RawQuery
}
if u.Fragment != "" {
// overwrite fragment if present
r.URL.Fragment = u.Fragment
}
return RewriteDone
}
// isValidFile checks if file exists on the filesystem.
// if file ends with `/`, it is validated as a directory.
func isValidFile(fs http.FileSystem, file string) bool {
if fs == nil {
return false
}
f, err := fs.Open(file)
if err != nil {
return false
}
defer f.Close()
stat, err := f.Stat()
if err != nil {
return false
}
// directory
if strings.HasSuffix(file, "/") {
return stat.IsDir()
}
// file
return !stat.IsDir()
}
| 1 | 8,473 | Is this a safe/reliable way to split the URL? | caddyserver-caddy | go |
@@ -350,6 +350,7 @@ public class JdbcProjectLoader extends AbstractJdbcLoader implements
public void uploadProjectFile(Project project, int version, String filetype,
String filename, File localFile, String uploader)
throws ProjectManagerException {
+ long startMs = System.currentTimeMillis();
logger.info("Uploading to " + project.getName() + " version:" + version
+ " file:" + filename);
Connection connection = getConnection(); | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.project;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.dbutils.DbUtils;
import org.apache.commons.dbutils.QueryRunner;
import org.apache.commons.dbutils.ResultSetHandler;
import org.apache.commons.io.IOUtils;
import org.apache.log4j.Logger;
import azkaban.database.AbstractJdbcLoader;
import azkaban.flow.Flow;
import azkaban.project.ProjectLogEvent.EventType;
import azkaban.user.Permission;
import azkaban.user.User;
import azkaban.utils.GZIPUtils;
import azkaban.utils.JSONUtils;
import azkaban.utils.Md5Hasher;
import azkaban.utils.Pair;
import azkaban.utils.Props;
import azkaban.utils.PropsUtils;
import azkaban.utils.Triple;
public class JdbcProjectLoader extends AbstractJdbcLoader implements
ProjectLoader {
private static final Logger logger = Logger
.getLogger(JdbcProjectLoader.class);
private static final int CHUCK_SIZE = 1024 * 1024 * 10;
private File tempDir;
private EncodingType defaultEncodingType = EncodingType.GZIP;
public JdbcProjectLoader(Props props) {
super(props);
tempDir = new File(props.getString("project.temp.dir", "temp"));
if (!tempDir.exists()) {
tempDir.mkdirs();
}
}
@Override
public List<Project> fetchAllActiveProjects() throws ProjectManagerException {
Connection connection = getConnection();
List<Project> projects = null;
try {
projects = fetchAllActiveProjects(connection);
} finally {
DbUtils.closeQuietly(connection);
}
return projects;
}
private List<Project> fetchAllActiveProjects(Connection connection)
throws ProjectManagerException {
QueryRunner runner = new QueryRunner();
ProjectResultHandler handler = new ProjectResultHandler();
List<Project> projects = null;
try {
projects =
runner.query(connection,
ProjectResultHandler.SELECT_ALL_ACTIVE_PROJECTS, handler);
for (Project project : projects) {
List<Triple<String, Boolean, Permission>> permissions =
fetchPermissionsForProject(connection, project);
for (Triple<String, Boolean, Permission> entry : permissions) {
if (entry.getSecond()) {
project.setGroupPermission(entry.getFirst(), entry.getThird());
} else {
project.setUserPermission(entry.getFirst(), entry.getThird());
}
}
}
} catch (SQLException e) {
throw new ProjectManagerException("Error retrieving all projects", e);
} finally {
DbUtils.closeQuietly(connection);
}
return projects;
}
@Override
public Project fetchProjectById(int id) throws ProjectManagerException {
Connection connection = getConnection();
Project project = null;
try {
project = fetchProjectById(connection, id);
} finally {
DbUtils.closeQuietly(connection);
}
return project;
}
private Project fetchProjectById(Connection connection, int id)
throws ProjectManagerException {
QueryRunner runner = new QueryRunner();
// Fetch the project
Project project = null;
ProjectResultHandler handler = new ProjectResultHandler();
try {
List<Project> projects =
runner.query(connection, ProjectResultHandler.SELECT_PROJECT_BY_ID,
handler, id);
if (projects.isEmpty()) {
throw new ProjectManagerException("No project with id " + id
+ " exists in db.");
}
project = projects.get(0);
} catch (SQLException e) {
logger.error(ProjectResultHandler.SELECT_PROJECT_BY_ID + " failed.");
throw new ProjectManagerException(
"Query for existing project failed. Project " + id, e);
}
// Fetch the user permissions
List<Triple<String, Boolean, Permission>> permissions =
fetchPermissionsForProject(connection, project);
for (Triple<String, Boolean, Permission> perm : permissions) {
if (perm.getThird().toFlags() != 0) {
if (perm.getSecond()) {
project.setGroupPermission(perm.getFirst(), perm.getThird());
} else {
project.setUserPermission(perm.getFirst(), perm.getThird());
}
}
}
return project;
}
/**
* Fetch first project with a given name {@inheritDoc}
*
* @see azkaban.project.ProjectLoader#fetchProjectByName(java.lang.String)
*/
@Override
public Project fetchProjectByName(String name)
throws ProjectManagerException {
Connection connection = getConnection();
Project project = null;
try {
project = fetchProjectByName(connection, name);
} finally {
DbUtils.closeQuietly(connection);
}
return project;
}
private Project fetchProjectByName(Connection connection, String name)
throws ProjectManagerException {
QueryRunner runner = new QueryRunner();
// Fetch the project
Project project = null;
ProjectResultHandler handler = new ProjectResultHandler();
try {
List<Project> projects =
runner.query(connection,
ProjectResultHandler.SELECT_PROJECT_BY_NAME, handler, name);
if (projects.isEmpty()) {
throw new ProjectManagerException(
"No project with name " + name + " exists in db.");
}
project = projects.get(0);
} catch (SQLException e) {
logger.error(ProjectResultHandler.SELECT_PROJECT_BY_NAME
+ " failed.");
throw new ProjectManagerException(
"Query for existing project failed. Project " + name, e);
}
// Fetch the user permissions
List<Triple<String, Boolean, Permission>> permissions =
fetchPermissionsForProject(connection, project);
for (Triple<String, Boolean, Permission> perm : permissions) {
if (perm.getThird().toFlags() != 0) {
if (perm.getSecond()) {
project
.setGroupPermission(perm.getFirst(), perm.getThird());
} else {
project.setUserPermission(perm.getFirst(), perm.getThird());
}
}
}
return project;
}
private List<Triple<String, Boolean, Permission>> fetchPermissionsForProject(
Connection connection, Project project) throws ProjectManagerException {
ProjectPermissionsResultHandler permHander =
new ProjectPermissionsResultHandler();
QueryRunner runner = new QueryRunner();
List<Triple<String, Boolean, Permission>> permissions = null;
try {
permissions =
runner.query(connection,
ProjectPermissionsResultHandler.SELECT_PROJECT_PERMISSION,
permHander, project.getId());
} catch (SQLException e) {
throw new ProjectManagerException("Query for permissions for "
+ project.getName() + " failed.", e);
}
return permissions;
}
/**
* Creates a Project in the db.
*
* It will throw an exception if it finds an active project of the same name,
* or the SQL fails
*/
@Override
public Project createNewProject(String name, String description, User creator)
throws ProjectManagerException {
Connection connection = getConnection();
Project project;
try {
// No need to commit, since createNewProject should commit.
project = createNewProject(connection, name, description, creator);
} finally {
DbUtils.closeQuietly(connection);
}
return project;
}
private synchronized Project createNewProject(Connection connection,
String name, String description, User creator)
throws ProjectManagerException {
QueryRunner runner = new QueryRunner();
ProjectResultHandler handler = new ProjectResultHandler();
// See if it exists first.
try {
List<Project> project =
runner
.query(connection,
ProjectResultHandler.SELECT_ACTIVE_PROJECT_BY_NAME, handler,
name);
if (!project.isEmpty()) {
throw new ProjectManagerException("Active project with name " + name
+ " already exists in db.");
}
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException(
"Checking for existing project failed. " + name, e);
}
final String INSERT_PROJECT =
"INSERT INTO projects ( name, active, modified_time, create_time, version, last_modified_by, description, enc_type, settings_blob) values (?,?,?,?,?,?,?,?,?)";
// Insert project
try {
long time = System.currentTimeMillis();
int i =
runner.update(connection, INSERT_PROJECT, name, true, time, time,
null, creator.getUserId(), description,
defaultEncodingType.getNumVal(), null);
if (i == 0) {
throw new ProjectManagerException("No projects have been inserted.");
}
connection.commit();
} catch (SQLException e) {
logger.error(INSERT_PROJECT + " failed.");
try {
connection.rollback();
} catch (SQLException e1) {
e1.printStackTrace();
}
throw new ProjectManagerException(
"Insert project for existing project failed. " + name, e);
}
// Do another query to grab and return the project.
Project project = null;
try {
List<Project> projects =
runner
.query(connection,
ProjectResultHandler.SELECT_ACTIVE_PROJECT_BY_NAME, handler,
name);
if (projects.isEmpty()) {
throw new ProjectManagerException("No active project with name " + name
+ " exists in db.");
} else if (projects.size() > 1) {
throw new ProjectManagerException("More than one active project "
+ name);
}
project = projects.get(0);
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException(
"Checking for existing project failed. " + name, e);
}
return project;
}
@Override
public void uploadProjectFile(Project project, int version, String filetype,
String filename, File localFile, String uploader)
throws ProjectManagerException {
logger.info("Uploading to " + project.getName() + " version:" + version
+ " file:" + filename);
Connection connection = getConnection();
try {
uploadProjectFile(connection, project, version, filetype, filename,
localFile, uploader);
connection.commit();
logger.info("Commiting upload " + localFile.getName());
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException("Error getting DB connection.", e);
} finally {
DbUtils.closeQuietly(connection);
}
}
private void uploadProjectFile(Connection connection, Project project,
int version, String filetype, String filename, File localFile,
String uploader) throws ProjectManagerException {
QueryRunner runner = new QueryRunner();
long updateTime = System.currentTimeMillis();
logger.info("Creating message digest for upload " + localFile.getName());
byte[] md5 = null;
try {
md5 = Md5Hasher.md5Hash(localFile);
} catch (IOException e) {
throw new ProjectManagerException("Error getting md5 hash.", e);
}
logger.info("Md5 hash created");
/**
* Insert a new version record to TABLE project_versions before uploading files.
*
* The reason for this operation:
* When error chunking happens in remote mysql server, incomplete file data remains
* in DB, and an SQL exception is thrown. If we don't have this operation before uploading file,
* the SQL exception prevents AZ from creating the new version record in Table project_versions.
* However, the Table project_files still reserve the incomplete files, which causes troubles
* when uploading a new file: Since the version in TABLE project_versions is still old, mysql will stop
* inserting new files to db.
*
* Why this operation is safe:
* When AZ uploads a new zip file, it always fetches the latest version proj_v from TABLE project_version,
* proj_v+1 will be used as the new version for the uploading files.
*
* Assume error chunking happens on day 1. proj_v is created for this bad file (old file version + 1).
* When we upload a new project zip in day2, new file in day 2 will use the new version (proj_v + 1).
* When file uploading completes, AZ will clean all old chunks in DB afterward.
*/
final String INSERT_PROJECT_VERSION =
"INSERT INTO project_versions (project_id, version, upload_time, uploader, file_type, file_name, md5, num_chunks) values (?,?,?,?,?,?,?,?)";
try {
/**
* As we don't know the num_chunks before uploading the file, we initialize it to 0,
* and will update it after uploading completes.
*/
runner.update(connection, INSERT_PROJECT_VERSION, project.getId(),
version, updateTime, uploader, filetype, filename, md5, 0);
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException("Error initializing project version "
+ project.getName(), e);
}
// Really... I doubt we'll get a > 2gig file. So int casting it is!
byte[] buffer = new byte[CHUCK_SIZE];
final String INSERT_PROJECT_FILES =
"INSERT INTO project_files (project_id, version, chunk, size, file) values (?,?,?,?,?)";
BufferedInputStream bufferedStream = null;
int chunk = 0;
try {
bufferedStream = new BufferedInputStream(new FileInputStream(localFile));
int size = bufferedStream.read(buffer);
while (size >= 0) {
logger.info("Read bytes for " + filename + " size:" + size);
byte[] buf = buffer;
if (size < buffer.length) {
buf = Arrays.copyOfRange(buffer, 0, size);
}
try {
logger.info("Running update for " + filename + " chunk " + chunk);
runner.update(connection, INSERT_PROJECT_FILES, project.getId(),
version, chunk, size, buf);
/**
* We enforce az committing to db when uploading every single chunk,
* in order to reduce the transaction duration and conserve sql server resources.
*
* If the files to be uploaded is very large and we don't commit every single chunk,
* the remote mysql server will run into memory troubles.
*/
connection.commit();
logger.info("Finished update for " + filename + " chunk " + chunk);
} catch (SQLException e) {
throw new ProjectManagerException("Error Chunking during uploading files to db...");
}
++chunk;
size = bufferedStream.read(buffer);
}
} catch (IOException e) {
throw new ProjectManagerException("Error chunking file " + filename);
} finally {
IOUtils.closeQuietly(bufferedStream);
}
/**
* we update num_chunks's actual number to db here.
*/
final String UPDATE_PROJECT_NUM_CHUNKS =
"UPDATE project_versions SET num_chunks=? WHERE project_id=? AND version=?";
try {
runner.update(connection, UPDATE_PROJECT_NUM_CHUNKS, chunk, project.getId(), version);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException(
"Error updating project " + project.getId() + " : chunk_num "
+ chunk, e);
}
}
@Override
public ProjectFileHandler getUploadedFile(Project project, int version)
throws ProjectManagerException {
logger.info("Retrieving to " + project.getName() + " version:" + version);
Connection connection = getConnection();
ProjectFileHandler handler = null;
try {
handler = getUploadedFile(connection, project.getId(), version);
} finally {
DbUtils.closeQuietly(connection);
}
return handler;
}
@Override
public ProjectFileHandler getUploadedFile(int projectId, int version)
throws ProjectManagerException {
logger.info("Retrieving to " + projectId + " version:" + version);
Connection connection = getConnection();
ProjectFileHandler handler = null;
try {
handler = getUploadedFile(connection, projectId, version);
} finally {
DbUtils.closeQuietly(connection);
}
return handler;
}
private ProjectFileHandler getUploadedFile(Connection connection,
int projectId, int version) throws ProjectManagerException {
QueryRunner runner = new QueryRunner();
ProjectVersionResultHandler pfHandler = new ProjectVersionResultHandler();
List<ProjectFileHandler> projectFiles = null;
try {
projectFiles =
runner.query(connection,
ProjectVersionResultHandler.SELECT_PROJECT_VERSION, pfHandler,
projectId, version);
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException(
"Query for uploaded file for project id " + projectId + " failed.", e);
}
if (projectFiles == null || projectFiles.isEmpty()) {
return null;
}
ProjectFileHandler projHandler = projectFiles.get(0);
int numChunks = projHandler.getNumChunks();
BufferedOutputStream bStream = null;
File file = null;
try {
try {
file =
File.createTempFile(projHandler.getFileName(),
String.valueOf(version), tempDir);
bStream = new BufferedOutputStream(new FileOutputStream(file));
} catch (IOException e) {
throw new ProjectManagerException(
"Error creating temp file for stream.");
}
int collect = 5;
int fromChunk = 0;
int toChunk = collect;
do {
ProjectFileChunkResultHandler chunkHandler =
new ProjectFileChunkResultHandler();
List<byte[]> data = null;
try {
data =
runner.query(connection,
ProjectFileChunkResultHandler.SELECT_PROJECT_CHUNKS_FILE,
chunkHandler, projectId, version, fromChunk, toChunk);
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException("Query for uploaded file for "
+ projectId + " failed.", e);
}
try {
for (byte[] d : data) {
bStream.write(d);
}
} catch (IOException e) {
throw new ProjectManagerException("Error writing file", e);
}
// Add all the bytes to the stream.
fromChunk += collect;
toChunk += collect;
} while (fromChunk <= numChunks);
} finally {
IOUtils.closeQuietly(bStream);
}
// Check md5.
byte[] md5 = null;
try {
md5 = Md5Hasher.md5Hash(file);
} catch (IOException e) {
throw new ProjectManagerException("Error getting md5 hash.", e);
}
if (Arrays.equals(projHandler.getMd5Hash(), md5)) {
logger.info("Md5 Hash is valid");
} else {
throw new ProjectManagerException("Md5 Hash failed on retrieval of file");
}
projHandler.setLocalFile(file);
return projHandler;
}
@Override
public void changeProjectVersion(Project project, int version, String user)
throws ProjectManagerException {
long timestamp = System.currentTimeMillis();
QueryRunner runner = createQueryRunner();
try {
final String UPDATE_PROJECT_VERSION =
"UPDATE projects SET version=?,modified_time=?,last_modified_by=? WHERE id=?";
runner.update(UPDATE_PROJECT_VERSION, version, timestamp, user,
project.getId());
project.setVersion(version);
project.setLastModifiedTimestamp(timestamp);
project.setLastModifiedUser(user);
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException(
"Error updating switching project version " + project.getName(), e);
}
}
@Override
public void updatePermission(Project project, String name, Permission perm,
boolean isGroup) throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
if (this.allowsOnDuplicateKey()) {
long updateTime = System.currentTimeMillis();
final String INSERT_PROJECT_PERMISSION =
"INSERT INTO project_permissions (project_id, modified_time, name, permissions, isGroup) values (?,?,?,?,?)"
+ "ON DUPLICATE KEY UPDATE modified_time = VALUES(modified_time), permissions = VALUES(permissions)";
try {
runner.update(INSERT_PROJECT_PERMISSION, project.getId(), updateTime,
name, perm.toFlags(), isGroup);
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException("Error updating project "
+ project.getName() + " permissions for " + name, e);
}
} else {
long updateTime = System.currentTimeMillis();
final String MERGE_PROJECT_PERMISSION =
"MERGE INTO project_permissions (project_id, modified_time, name, permissions, isGroup) KEY (project_id, name) values (?,?,?,?,?)";
try {
runner.update(MERGE_PROJECT_PERMISSION, project.getId(), updateTime,
name, perm.toFlags(), isGroup);
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException("Error updating project "
+ project.getName() + " permissions for " + name, e);
}
}
if (isGroup) {
project.setGroupPermission(name, perm);
} else {
project.setUserPermission(name, perm);
}
}
@Override
public void updateProjectSettings(Project project)
throws ProjectManagerException {
Connection connection = getConnection();
try {
updateProjectSettings(connection, project, defaultEncodingType);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException("Error updating project settings", e);
} finally {
DbUtils.closeQuietly(connection);
}
}
private void updateProjectSettings(Connection connection, Project project,
EncodingType encType) throws ProjectManagerException {
QueryRunner runner = new QueryRunner();
final String UPDATE_PROJECT_SETTINGS =
"UPDATE projects SET enc_type=?, settings_blob=? WHERE id=?";
String json = JSONUtils.toJSON(project.toObject());
byte[] data = null;
try {
byte[] stringData = json.getBytes("UTF-8");
data = stringData;
if (encType == EncodingType.GZIP) {
data = GZIPUtils.gzipBytes(stringData);
}
logger.debug("NumChars: " + json.length() + " UTF-8:" + stringData.length
+ " Gzip:" + data.length);
} catch (IOException e) {
throw new ProjectManagerException("Failed to encode. ", e);
}
try {
runner.update(connection, UPDATE_PROJECT_SETTINGS, encType.getNumVal(),
data, project.getId());
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException("Error updating project "
+ project.getName() + " version " + project.getVersion(), e);
}
}
@Override
public void removePermission(Project project, String name, boolean isGroup)
throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
final String DELETE_PROJECT_PERMISSION =
"DELETE FROM project_permissions WHERE project_id=? AND name=? AND isGroup=?";
try {
runner.update(DELETE_PROJECT_PERMISSION, project.getId(), name, isGroup);
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException("Error deleting project "
+ project.getName() + " permissions for " + name, e);
}
if (isGroup) {
project.removeGroupPermission(name);
} else {
project.removeUserPermission(name);
}
}
@Override
public List<Triple<String, Boolean, Permission>> getProjectPermissions(
int projectId) throws ProjectManagerException {
ProjectPermissionsResultHandler permHander =
new ProjectPermissionsResultHandler();
QueryRunner runner = createQueryRunner();
List<Triple<String, Boolean, Permission>> permissions = null;
try {
permissions =
runner.query(
ProjectPermissionsResultHandler.SELECT_PROJECT_PERMISSION,
permHander, projectId);
} catch (SQLException e) {
throw new ProjectManagerException("Query for permissions for "
+ projectId + " failed.", e);
}
return permissions;
}
@Override
public void removeProject(Project project, String user)
throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
long updateTime = System.currentTimeMillis();
final String UPDATE_INACTIVE_PROJECT =
"UPDATE projects SET active=false,modified_time=?,last_modified_by=? WHERE id=?";
try {
runner.update(UPDATE_INACTIVE_PROJECT, updateTime, user, project.getId());
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException("Error marking project "
+ project.getName() + " as inactive", e);
}
}
@Override
public boolean postEvent(Project project, EventType type, String user,
String message) {
QueryRunner runner = createQueryRunner();
final String INSERT_PROJECT_EVENTS =
"INSERT INTO project_events (project_id, event_type, event_time, username, message) values (?,?,?,?,?)";
long updateTime = System.currentTimeMillis();
try {
runner.update(INSERT_PROJECT_EVENTS, project.getId(), type.getNumVal(),
updateTime, user, message);
} catch (SQLException e) {
e.printStackTrace();
return false;
}
return true;
}
/**
* Get all the logs for a given project
*
* @param project
* @return
* @throws ProjectManagerException
*/
public List<ProjectLogEvent> getProjectEvents(Project project, int num,
int skip) throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
ProjectLogsResultHandler logHandler = new ProjectLogsResultHandler();
List<ProjectLogEvent> events = null;
try {
events =
runner.query(ProjectLogsResultHandler.SELECT_PROJECT_EVENTS_ORDER,
logHandler, project.getId(), num, skip);
} catch (SQLException e) {
logger.error(e);
}
return events;
}
@Override
public void updateDescription(Project project, String description, String user)
throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
final String UPDATE_PROJECT_DESCRIPTION =
"UPDATE projects SET description=?,modified_time=?,last_modified_by=? WHERE id=?";
long updateTime = System.currentTimeMillis();
try {
runner.update(UPDATE_PROJECT_DESCRIPTION, description, updateTime, user,
project.getId());
project.setDescription(description);
project.setLastModifiedTimestamp(updateTime);
project.setLastModifiedUser(user);
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException("Error marking project "
+ project.getName() + " as inactive", e);
}
}
@Override
public int getLatestProjectVersion(Project project)
throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
IntHander handler = new IntHander();
try {
return runner.query(IntHander.SELECT_LATEST_VERSION, handler,
project.getId());
} catch (SQLException e) {
logger.error(e);
throw new ProjectManagerException("Error marking project "
+ project.getName() + " as inactive", e);
}
}
@Override
public void uploadFlows(Project project, int version, Collection<Flow> flows)
throws ProjectManagerException {
// We do one at a time instead of batch... because well, the batch could be
// large.
logger.info("Uploading flows");
Connection connection = getConnection();
try {
for (Flow flow : flows) {
uploadFlow(connection, project, version, flow, defaultEncodingType);
}
connection.commit();
} catch (IOException e) {
throw new ProjectManagerException("Flow Upload failed.", e);
} catch (SQLException e) {
throw new ProjectManagerException("Flow Upload failed.", e);
} finally {
DbUtils.closeQuietly(connection);
}
}
@Override
public void uploadFlow(Project project, int version, Flow flow)
throws ProjectManagerException {
logger.info("Uploading flows");
Connection connection = getConnection();
try {
uploadFlow(connection, project, version, flow, defaultEncodingType);
connection.commit();
} catch (IOException e) {
throw new ProjectManagerException("Flow Upload failed.", e);
} catch (SQLException e) {
throw new ProjectManagerException("Flow Upload failed commit.", e);
} finally {
DbUtils.closeQuietly(connection);
}
}
@Override
public void updateFlow(Project project, int version, Flow flow)
throws ProjectManagerException {
logger.info("Uploading flows");
Connection connection = getConnection();
try {
QueryRunner runner = new QueryRunner();
String json = JSONUtils.toJSON(flow.toObject());
byte[] stringData = json.getBytes("UTF-8");
byte[] data = stringData;
if (defaultEncodingType == EncodingType.GZIP) {
data = GZIPUtils.gzipBytes(stringData);
}
logger.info("Flow upload " + flow.getId() + " is byte size "
+ data.length);
final String UPDATE_FLOW =
"UPDATE project_flows SET encoding_type=?,json=? WHERE project_id=? AND version=? AND flow_id=?";
try {
runner.update(connection, UPDATE_FLOW, defaultEncodingType.getNumVal(),
data, project.getId(), version, flow.getId());
} catch (SQLException e) {
e.printStackTrace();
throw new ProjectManagerException("Error inserting flow "
+ flow.getId(), e);
}
connection.commit();
} catch (IOException e) {
throw new ProjectManagerException("Flow Upload failed.", e);
} catch (SQLException e) {
throw new ProjectManagerException("Flow Upload failed commit.", e);
} finally {
DbUtils.closeQuietly(connection);
}
}
public EncodingType getDefaultEncodingType() {
return defaultEncodingType;
}
public void setDefaultEncodingType(EncodingType defaultEncodingType) {
this.defaultEncodingType = defaultEncodingType;
}
private void uploadFlow(Connection connection, Project project, int version,
Flow flow, EncodingType encType) throws ProjectManagerException,
IOException {
QueryRunner runner = new QueryRunner();
String json = JSONUtils.toJSON(flow.toObject());
byte[] stringData = json.getBytes("UTF-8");
byte[] data = stringData;
if (encType == EncodingType.GZIP) {
data = GZIPUtils.gzipBytes(stringData);
}
logger.info("Flow upload " + flow.getId() + " is byte size " + data.length);
final String INSERT_FLOW =
"INSERT INTO project_flows (project_id, version, flow_id, modified_time, encoding_type, json) values (?,?,?,?,?,?)";
try {
runner.update(connection, INSERT_FLOW, project.getId(), version,
flow.getId(), System.currentTimeMillis(), encType.getNumVal(), data);
} catch (SQLException e) {
throw new ProjectManagerException("Error inserting flow " + flow.getId(),
e);
}
}
@Override
public Flow fetchFlow(Project project, String flowId)
throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
ProjectFlowsResultHandler handler = new ProjectFlowsResultHandler();
try {
List<Flow> flows =
runner.query(ProjectFlowsResultHandler.SELECT_PROJECT_FLOW, handler,
project.getId(), project.getVersion(), flowId);
if (flows.isEmpty()) {
return null;
} else {
return flows.get(0);
}
} catch (SQLException e) {
throw new ProjectManagerException("Error fetching flow " + flowId, e);
}
}
@Override
public List<Flow> fetchAllProjectFlows(Project project)
throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
ProjectFlowsResultHandler handler = new ProjectFlowsResultHandler();
List<Flow> flows = null;
try {
flows =
runner.query(ProjectFlowsResultHandler.SELECT_ALL_PROJECT_FLOWS,
handler, project.getId(), project.getVersion());
} catch (SQLException e) {
throw new ProjectManagerException("Error fetching flows from project "
+ project.getName() + " version " + project.getVersion(), e);
}
return flows;
}
@Override
public void uploadProjectProperties(Project project, List<Props> properties)
throws ProjectManagerException {
Connection connection = getConnection();
try {
for (Props props : properties) {
uploadProjectProperty(connection, project, props.getSource(), props);
}
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException(
"Error uploading project property files", e);
} catch (IOException e) {
throw new ProjectManagerException(
"Error uploading project property files", e);
} finally {
DbUtils.closeQuietly(connection);
}
}
@Override
public void uploadProjectProperty(Project project, Props props)
throws ProjectManagerException {
Connection connection = getConnection();
try {
uploadProjectProperty(connection, project, props.getSource(), props);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException(
"Error uploading project property files", e);
} catch (IOException e) {
throw new ProjectManagerException(
"Error uploading project property file", e);
} finally {
DbUtils.closeQuietly(connection);
}
}
@Override
public void updateProjectProperty(Project project, Props props)
throws ProjectManagerException {
Connection connection = getConnection();
try {
updateProjectProperty(connection, project, props.getSource(), props);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException(
"Error uploading project property files", e);
} catch (IOException e) {
throw new ProjectManagerException(
"Error uploading project property file", e);
} finally {
DbUtils.closeQuietly(connection);
}
}
private void updateProjectProperty(Connection connection, Project project,
String name, Props props) throws ProjectManagerException, IOException {
QueryRunner runner = new QueryRunner();
final String UPDATE_PROPERTIES =
"UPDATE project_properties SET property=? WHERE project_id=? AND version=? AND name=?";
String propertyJSON = PropsUtils.toJSONString(props, true);
byte[] data = propertyJSON.getBytes("UTF-8");
if (defaultEncodingType == EncodingType.GZIP) {
data = GZIPUtils.gzipBytes(data);
}
try {
runner.update(connection, UPDATE_PROPERTIES, data, project.getId(),
project.getVersion(), name);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException("Error updating property "
+ project.getName() + " version " + project.getVersion(), e);
}
}
private void uploadProjectProperty(Connection connection, Project project,
String name, Props props) throws ProjectManagerException, IOException {
QueryRunner runner = new QueryRunner();
final String INSERT_PROPERTIES =
"INSERT INTO project_properties (project_id, version, name, modified_time, encoding_type, property) values (?,?,?,?,?,?)";
String propertyJSON = PropsUtils.toJSONString(props, true);
byte[] data = propertyJSON.getBytes("UTF-8");
if (defaultEncodingType == EncodingType.GZIP) {
data = GZIPUtils.gzipBytes(data);
}
try {
runner.update(connection, INSERT_PROPERTIES, project.getId(),
project.getVersion(), name, System.currentTimeMillis(),
defaultEncodingType.getNumVal(), data);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException("Error uploading project properties "
+ name + " into " + project.getName() + " version "
+ project.getVersion(), e);
}
}
@Override
public Props fetchProjectProperty(int projectId, int projectVer,
String propsName) throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
ProjectPropertiesResultsHandler handler =
new ProjectPropertiesResultsHandler();
try {
List<Pair<String, Props>> properties =
runner.query(ProjectPropertiesResultsHandler.SELECT_PROJECT_PROPERTY,
handler, projectId, projectVer, propsName);
if (properties == null || properties.isEmpty()) {
return null;
}
return properties.get(0).getSecond();
} catch (SQLException e) {
logger.error("Error fetching property " + propsName
+ " Project " + projectId + " version " + projectVer, e);
throw new ProjectManagerException("Error fetching property " + propsName,
e);
}
}
@Override
public Props fetchProjectProperty(Project project, String propsName)
throws ProjectManagerException {
// TODO: 11/23/16 call the other overloaded method fetchProjectProperty internally.
QueryRunner runner = createQueryRunner();
ProjectPropertiesResultsHandler handler =
new ProjectPropertiesResultsHandler();
try {
List<Pair<String, Props>> properties =
runner.query(ProjectPropertiesResultsHandler.SELECT_PROJECT_PROPERTY,
handler, project.getId(), project.getVersion(), propsName);
if (properties == null || properties.isEmpty()) {
logger.warn("Project " + project.getId() + " version " + project.getVersion()
+ " property " + propsName + " is empty.");
return null;
}
return properties.get(0).getSecond();
} catch (SQLException e) {
logger.error("Error fetching property " + propsName
+ "Project " + project.getId() + " version " + project.getVersion(), e);
throw new ProjectManagerException("Error fetching property " + propsName
+ "Project " + project.getId() + " version " + project.getVersion(), e);
}
}
@Override
public void cleanOlderProjectVersion(int projectId, int version)
throws ProjectManagerException {
Connection connection = getConnection();
try {
cleanOlderProjectVersionFlows(connection, projectId, version);
cleanOlderProjectVersionProperties(connection, projectId, version);
cleanOlderProjectFiles(connection, projectId, version);
cleanOlderProjectVersion(connection, projectId, version);
} finally {
DbUtils.closeQuietly(connection);
}
}
private void cleanOlderProjectVersionFlows(Connection connection,
int projectId, int version) throws ProjectManagerException {
final String DELETE_FLOW =
"DELETE FROM project_flows WHERE project_id=? AND version<?";
QueryRunner runner = new QueryRunner();
try {
runner.update(connection, DELETE_FLOW, projectId, version);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException("Error deleting project version flows "
+ projectId + ":" + version, e);
}
}
private void cleanOlderProjectVersionProperties(Connection connection,
int projectId, int version) throws ProjectManagerException {
final String DELETE_PROPERTIES =
"DELETE FROM project_properties WHERE project_id=? AND version<?";
QueryRunner runner = new QueryRunner();
try {
runner.update(connection, DELETE_PROPERTIES, projectId, version);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException(
"Error deleting project version properties " + projectId + ":"
+ version, e);
}
}
private void cleanOlderProjectFiles(Connection connection, int projectId,
int version) throws ProjectManagerException {
final String DELETE_PROJECT_FILES =
"DELETE FROM project_files WHERE project_id=? AND version<?";
QueryRunner runner = new QueryRunner();
try {
runner.update(connection, DELETE_PROJECT_FILES, projectId, version);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException("Error deleting project version files "
+ projectId + ":" + version, e);
}
}
private void cleanOlderProjectVersion(Connection connection, int projectId,
int version) throws ProjectManagerException {
final String UPDATE_PROJECT_VERSIONS =
"UPDATE project_versions SET num_chunks=0 WHERE project_id=? AND version<?";
QueryRunner runner = new QueryRunner();
try {
runner.update(connection, UPDATE_PROJECT_VERSIONS, projectId, version);
connection.commit();
} catch (SQLException e) {
throw new ProjectManagerException(
"Error updating project version chunksize " + projectId + ":"
+ version, e);
}
}
@Override
public Map<String, Props> fetchProjectProperties(int projectId, int version)
throws ProjectManagerException {
QueryRunner runner = createQueryRunner();
ProjectPropertiesResultsHandler handler =
new ProjectPropertiesResultsHandler();
try {
List<Pair<String, Props>> properties =
runner.query(
ProjectPropertiesResultsHandler.SELECT_PROJECT_PROPERTIES,
handler, projectId, version);
if (properties == null || properties.isEmpty()) {
return null;
}
HashMap<String, Props> props = new HashMap<String, Props>();
for (Pair<String, Props> pair : properties) {
props.put(pair.getFirst(), pair.getSecond());
}
return props;
} catch (SQLException e) {
logger.error("Error fetching properties, project id" + projectId + " version " + version, e);
throw new ProjectManagerException("Error fetching properties", e);
}
}
private static class ProjectResultHandler implements
ResultSetHandler<List<Project>> {
private static String SELECT_PROJECT_BY_NAME =
"SELECT id, name, active, modified_time, create_time, version, last_modified_by, description, enc_type, settings_blob FROM projects WHERE name=?";
private static String SELECT_PROJECT_BY_ID =
"SELECT id, name, active, modified_time, create_time, version, last_modified_by, description, enc_type, settings_blob FROM projects WHERE id=?";
private static String SELECT_ALL_ACTIVE_PROJECTS =
"SELECT id, name, active, modified_time, create_time, version, last_modified_by, description, enc_type, settings_blob FROM projects WHERE active=true";
private static String SELECT_ACTIVE_PROJECT_BY_NAME =
"SELECT id, name, active, modified_time, create_time, version, last_modified_by, description, enc_type, settings_blob FROM projects WHERE name=? AND active=true";
@Override
public List<Project> handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.<Project> emptyList();
}
ArrayList<Project> projects = new ArrayList<Project>();
do {
int id = rs.getInt(1);
String name = rs.getString(2);
boolean active = rs.getBoolean(3);
long modifiedTime = rs.getLong(4);
long createTime = rs.getLong(5);
int version = rs.getInt(6);
String lastModifiedBy = rs.getString(7);
String description = rs.getString(8);
int encodingType = rs.getInt(9);
byte[] data = rs.getBytes(10);
Project project;
if (data != null) {
EncodingType encType = EncodingType.fromInteger(encodingType);
Object blobObj;
try {
// Convoluted way to inflate strings. Should find common package or
// helper function.
if (encType == EncodingType.GZIP) {
// Decompress the sucker.
String jsonString = GZIPUtils.unGzipString(data, "UTF-8");
blobObj = JSONUtils.parseJSONFromString(jsonString);
} else {
String jsonString = new String(data, "UTF-8");
blobObj = JSONUtils.parseJSONFromString(jsonString);
}
project = Project.projectFromObject(blobObj);
} catch (IOException e) {
throw new SQLException("Failed to get project.", e);
}
} else {
project = new Project(id, name);
}
// update the fields as they may have changed
project.setActive(active);
project.setLastModifiedTimestamp(modifiedTime);
project.setCreateTimestamp(createTime);
project.setVersion(version);
project.setLastModifiedUser(lastModifiedBy);
project.setDescription(description);
projects.add(project);
} while (rs.next());
return projects;
}
}
private static class ProjectPermissionsResultHandler implements
ResultSetHandler<List<Triple<String, Boolean, Permission>>> {
private static String SELECT_PROJECT_PERMISSION =
"SELECT project_id, modified_time, name, permissions, isGroup FROM project_permissions WHERE project_id=?";
@Override
public List<Triple<String, Boolean, Permission>> handle(ResultSet rs)
throws SQLException {
if (!rs.next()) {
return Collections.<Triple<String, Boolean, Permission>> emptyList();
}
ArrayList<Triple<String, Boolean, Permission>> permissions =
new ArrayList<Triple<String, Boolean, Permission>>();
do {
String username = rs.getString(3);
int permissionFlag = rs.getInt(4);
boolean val = rs.getBoolean(5);
Permission perm = new Permission(permissionFlag);
permissions.add(new Triple<String, Boolean, Permission>(username, val,
perm));
} while (rs.next());
return permissions;
}
}
private static class ProjectFlowsResultHandler implements
ResultSetHandler<List<Flow>> {
private static String SELECT_PROJECT_FLOW =
"SELECT project_id, version, flow_id, modified_time, encoding_type, json FROM project_flows WHERE project_id=? AND version=? AND flow_id=?";
private static String SELECT_ALL_PROJECT_FLOWS =
"SELECT project_id, version, flow_id, modified_time, encoding_type, json FROM project_flows WHERE project_id=? AND version=?";
@Override
public List<Flow> handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.<Flow> emptyList();
}
ArrayList<Flow> flows = new ArrayList<Flow>();
do {
String flowId = rs.getString(3);
int encodingType = rs.getInt(5);
byte[] dataBytes = rs.getBytes(6);
if (dataBytes == null) {
continue;
}
EncodingType encType = EncodingType.fromInteger(encodingType);
Object flowObj = null;
try {
// Convoluted way to inflate strings. Should find common package or
// helper function.
if (encType == EncodingType.GZIP) {
// Decompress the sucker.
String jsonString = GZIPUtils.unGzipString(dataBytes, "UTF-8");
flowObj = JSONUtils.parseJSONFromString(jsonString);
} else {
String jsonString = new String(dataBytes, "UTF-8");
flowObj = JSONUtils.parseJSONFromString(jsonString);
}
Flow flow = Flow.flowFromObject(flowObj);
flows.add(flow);
} catch (IOException e) {
throw new SQLException("Error retrieving flow data " + flowId, e);
}
} while (rs.next());
return flows;
}
}
private static class ProjectPropertiesResultsHandler implements
ResultSetHandler<List<Pair<String, Props>>> {
private static String SELECT_PROJECT_PROPERTY =
"SELECT project_id, version, name, modified_time, encoding_type, property FROM project_properties WHERE project_id=? AND version=? AND name=?";
private static String SELECT_PROJECT_PROPERTIES =
"SELECT project_id, version, name, modified_time, encoding_type, property FROM project_properties WHERE project_id=? AND version=?";
@Override
public List<Pair<String, Props>> handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.<Pair<String, Props>> emptyList();
}
List<Pair<String, Props>> properties =
new ArrayList<Pair<String, Props>>();
do {
String name = rs.getString(3);
int eventType = rs.getInt(5);
byte[] dataBytes = rs.getBytes(6);
EncodingType encType = EncodingType.fromInteger(eventType);
String propertyString = null;
try {
if (encType == EncodingType.GZIP) {
// Decompress the sucker.
propertyString = GZIPUtils.unGzipString(dataBytes, "UTF-8");
} else {
propertyString = new String(dataBytes, "UTF-8");
}
Props props = PropsUtils.fromJSONString(propertyString);
props.setSource(name);
properties.add(new Pair<String, Props>(name, props));
} catch (IOException e) {
throw new SQLException(e);
}
} while (rs.next());
return properties;
}
}
private static class ProjectLogsResultHandler implements
ResultSetHandler<List<ProjectLogEvent>> {
private static String SELECT_PROJECT_EVENTS_ORDER =
"SELECT project_id, event_type, event_time, username, message FROM project_events WHERE project_id=? ORDER BY event_time DESC LIMIT ? OFFSET ?";
@Override
public List<ProjectLogEvent> handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.<ProjectLogEvent> emptyList();
}
ArrayList<ProjectLogEvent> events = new ArrayList<ProjectLogEvent>();
do {
int projectId = rs.getInt(1);
int eventType = rs.getInt(2);
long eventTime = rs.getLong(3);
String username = rs.getString(4);
String message = rs.getString(5);
ProjectLogEvent event =
new ProjectLogEvent(projectId, EventType.fromInteger(eventType),
eventTime, username, message);
events.add(event);
} while (rs.next());
return events;
}
}
private static class ProjectFileChunkResultHandler implements
ResultSetHandler<List<byte[]>> {
private static String SELECT_PROJECT_CHUNKS_FILE =
"SELECT project_id, version, chunk, size, file FROM project_files WHERE project_id=? AND version=? AND chunk >= ? AND chunk < ? ORDER BY chunk ASC";
@Override
public List<byte[]> handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.<byte[]> emptyList();
}
ArrayList<byte[]> data = new ArrayList<byte[]>();
do {
byte[] bytes = rs.getBytes(5);
data.add(bytes);
} while (rs.next());
return data;
}
}
private static class ProjectVersionResultHandler implements
ResultSetHandler<List<ProjectFileHandler>> {
private static String SELECT_PROJECT_VERSION =
"SELECT project_id, version, upload_time, uploader, file_type, file_name, md5, num_chunks FROM project_versions WHERE project_id=? AND version=?";
@Override
public List<ProjectFileHandler> handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return null;
}
List<ProjectFileHandler> handlers = new ArrayList<ProjectFileHandler>();
do {
int projectId = rs.getInt(1);
int version = rs.getInt(2);
long uploadTime = rs.getLong(3);
String uploader = rs.getString(4);
String fileType = rs.getString(5);
String fileName = rs.getString(6);
byte[] md5 = rs.getBytes(7);
int numChunks = rs.getInt(8);
ProjectFileHandler handler =
new ProjectFileHandler(projectId, version, uploadTime, uploader,
fileType, fileName, numChunks, md5);
handlers.add(handler);
} while (rs.next());
return handlers;
}
}
private static class IntHander implements ResultSetHandler<Integer> {
private static String SELECT_LATEST_VERSION =
"SELECT MAX(version) FROM project_versions WHERE project_id=?";
@Override
public Integer handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return 0;
}
return rs.getInt(1);
}
}
private Connection getConnection() throws ProjectManagerException {
Connection connection = null;
try {
connection = super.getDBConnection(false);
} catch (Exception e) {
DbUtils.closeQuietly(connection);
throw new ProjectManagerException("Error getting DB connection.", e);
}
return connection;
}
}
| 1 | 12,343 | do we also need to profile the time to getConnection()? | azkaban-azkaban | java |
@@ -34,6 +34,12 @@ def flow_to_json(flow: mitmproxy.flow.Flow) -> dict:
"type": flow.type,
"modified": flow.modified(),
}
+ # .alpn_proto_negotiated is bytes, we need to decode that.
+ for conn in "client_conn", "server_conn":
+ if f[conn]["alpn_proto_negotiated"] is None:
+ continue
+ f[conn]["alpn_proto_negotiated"] = \
+ f[conn]["alpn_proto_negotiated"].decode(errors="backslashreplace")
if flow.error:
f["error"] = flow.error.get_state()
| 1 | import hashlib
import json
import logging
import os.path
import re
from io import BytesIO
import mitmproxy.addons.view
import mitmproxy.flow
import tornado.escape
import tornado.web
import tornado.websocket
from mitmproxy import contentviews
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy import io
from mitmproxy import log
from mitmproxy import version
def flow_to_json(flow: mitmproxy.flow.Flow) -> dict:
"""
Remove flow message content and cert to save transmission space.
Args:
flow: The original flow.
"""
f = {
"id": flow.id,
"intercepted": flow.intercepted,
"client_conn": flow.client_conn.get_state(),
"server_conn": flow.server_conn.get_state(),
"type": flow.type,
"modified": flow.modified(),
}
if flow.error:
f["error"] = flow.error.get_state()
if isinstance(flow, http.HTTPFlow):
if flow.request:
f["request"] = {
"method": flow.request.method,
"scheme": flow.request.scheme,
"host": flow.request.host,
"port": flow.request.port,
"path": flow.request.path,
"http_version": flow.request.http_version,
"headers": tuple(flow.request.headers.items(True)),
"contentLength": len(
flow.request.raw_content) if flow.request.raw_content is not None else None,
"contentHash": hashlib.sha256(
flow.request.raw_content).hexdigest() if flow.request.raw_content is not None else None,
"timestamp_start": flow.request.timestamp_start,
"timestamp_end": flow.request.timestamp_end,
"is_replay": flow.request.is_replay,
}
if flow.response:
f["response"] = {
"http_version": flow.response.http_version,
"status_code": flow.response.status_code,
"reason": flow.response.reason,
"headers": tuple(flow.response.headers.items(True)),
"contentLength": len(
flow.response.raw_content) if flow.response.raw_content is not None else None,
"contentHash": hashlib.sha256(
flow.response.raw_content).hexdigest() if flow.response.raw_content is not None else None,
"timestamp_start": flow.response.timestamp_start,
"timestamp_end": flow.response.timestamp_end,
"is_replay": flow.response.is_replay,
}
f.get("server_conn", {}).pop("cert", None)
return f
def logentry_to_json(e: log.LogEntry) -> dict:
return {
"id": id(e), # we just need some kind of id.
"message": e.msg,
"level": e.level
}
class APIError(tornado.web.HTTPError):
pass
class RequestHandler(tornado.web.RequestHandler):
def write(self, chunk):
# Writing arrays on the top level is ok nowadays.
# http://flask.pocoo.org/docs/0.11/security/#json-security
if isinstance(chunk, list):
chunk = tornado.escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
super(RequestHandler, self).write(chunk)
def set_default_headers(self):
super().set_default_headers()
self.set_header("Server", version.MITMPROXY)
self.set_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
self.add_header(
"Content-Security-Policy",
"default-src 'self'; "
"connect-src 'self' ws://* ; "
"style-src 'self' 'unsafe-inline'"
)
@property
def json(self):
if not self.request.headers.get("Content-Type", "").startswith("application/json"):
raise APIError(400, "Invalid Content-Type, expected application/json.")
try:
return json.loads(self.request.body.decode())
except Exception as e:
raise APIError(400, "Malformed JSON: {}".format(str(e)))
@property
def filecontents(self):
"""
Accept either a multipart/form file upload or just take the plain request body.
"""
if self.request.files:
return next(iter(self.request.files.values()))[0].body
else:
return self.request.body
@property
def view(self) -> mitmproxy.addons.view.View:
return self.application.master.view
@property
def master(self) -> "mitmproxy.tools.web.master.WebMaster":
return self.application.master
@property
def flow(self) -> mitmproxy.flow.Flow:
flow_id = str(self.path_kwargs["flow_id"])
# FIXME: Add a facility to addon.view to safely access the store
flow = self.view.get_by_id(flow_id)
if flow:
return flow
else:
raise APIError(404, "Flow not found.")
def write_error(self, status_code: int, **kwargs):
if "exc_info" in kwargs and isinstance(kwargs["exc_info"][1], APIError):
self.finish(kwargs["exc_info"][1].log_message)
else:
super().write_error(status_code, **kwargs)
class IndexHandler(RequestHandler):
def get(self):
token = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645
assert token
self.render("index.html")
class FilterHelp(RequestHandler):
def get(self):
self.write(dict(
commands=flowfilter.help
))
class WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler):
# raise an error if inherited class doesn't specify its own instance.
connections = None # type: set
def open(self):
self.connections.add(self)
def on_close(self):
self.connections.remove(self)
@classmethod
def broadcast(cls, **kwargs):
message = json.dumps(kwargs, ensure_ascii=False)
for conn in cls.connections:
try:
conn.write_message(message)
except Exception: # pragma: no cover
logging.error("Error sending message", exc_info=True)
class ClientConnection(WebSocketEventBroadcaster):
connections = set() # type: set
class Flows(RequestHandler):
def get(self):
self.write([flow_to_json(f) for f in self.view])
class DumpFlows(RequestHandler):
def get(self):
self.set_header("Content-Disposition", "attachment; filename=flows")
self.set_header("Content-Type", "application/octet-stream")
bio = BytesIO()
fw = io.FlowWriter(bio)
for f in self.view:
fw.add(f)
self.write(bio.getvalue())
bio.close()
def post(self):
self.view.clear()
bio = BytesIO(self.filecontents)
self.master.load_flows(io.FlowReader(bio))
bio.close()
class ClearAll(RequestHandler):
def post(self):
self.view.clear()
self.master.events.clear()
class ResumeFlows(RequestHandler):
def post(self):
for f in self.view:
f.resume()
self.view.update(f)
class KillFlows(RequestHandler):
def post(self):
for f in self.view:
if f.killable:
f.kill()
self.view.update(f)
class ResumeFlow(RequestHandler):
def post(self, flow_id):
self.flow.resume()
self.view.update(self.flow)
class KillFlow(RequestHandler):
def post(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.update(self.flow)
class FlowHandler(RequestHandler):
def delete(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.remove(self.flow)
def put(self, flow_id):
flow = self.flow
flow.backup()
try:
for a, b in self.json.items():
if a == "request" and hasattr(flow, "request"):
request = flow.request
for k, v in b.items():
if k in ["method", "scheme", "host", "path", "http_version"]:
setattr(request, k, str(v))
elif k == "port":
request.port = int(v)
elif k == "headers":
request.headers.clear()
for header in v:
request.headers.add(*header)
elif k == "content":
request.text = v
else:
raise APIError(400, "Unknown update request.{}: {}".format(k, v))
elif a == "response" and hasattr(flow, "response"):
response = flow.response
for k, v in b.items():
if k in ["msg", "http_version"]:
setattr(response, k, str(v))
elif k == "code":
response.status_code = int(v)
elif k == "headers":
response.headers.clear()
for header in v:
response.headers.add(*header)
elif k == "content":
response.text = v
else:
raise APIError(400, "Unknown update response.{}: {}".format(k, v))
else:
raise APIError(400, "Unknown update {}: {}".format(a, b))
except APIError:
flow.revert()
raise
self.view.update(flow)
class DuplicateFlow(RequestHandler):
def post(self, flow_id):
f = self.flow.copy()
self.view.add(f)
self.write(f.id)
class RevertFlow(RequestHandler):
def post(self, flow_id):
if self.flow.modified():
self.flow.revert()
self.view.update(self.flow)
class ReplayFlow(RequestHandler):
def post(self, flow_id):
self.flow.backup()
self.flow.response = None
self.view.update(self.flow)
try:
self.master.replay_request(self.flow)
except exceptions.ReplayException as e:
raise APIError(400, str(e))
class FlowContent(RequestHandler):
def post(self, flow_id, message):
self.flow.backup()
message = getattr(self.flow, message)
message.content = self.filecontents
self.view.update(self.flow)
def get(self, flow_id, message):
message = getattr(self.flow, message)
if not message.raw_content:
raise APIError(400, "No content.")
content_encoding = message.headers.get("Content-Encoding", None)
if content_encoding:
content_encoding = re.sub(r"[^\w]", "", content_encoding)
self.set_header("Content-Encoding", content_encoding)
original_cd = message.headers.get("Content-Disposition", None)
filename = None
if original_cd:
filename = re.search('filename=([-\w" .()]+)', original_cd)
if filename:
filename = filename.group(1)
if not filename:
filename = self.flow.request.path.split("?")[0].split("/")[-1]
filename = re.sub(r'[^-\w" .()]', "", filename)
cd = "attachment; filename={}".format(filename)
self.set_header("Content-Disposition", cd)
self.set_header("Content-Type", "application/text")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("X-Frame-Options", "DENY")
self.write(message.raw_content)
class FlowContentView(RequestHandler):
def get(self, flow_id, message, content_view):
message = getattr(self.flow, message)
description, lines, error = contentviews.get_message_content_view(
content_view.replace('_', ' '), message
)
# if error:
# add event log
self.write(dict(
lines=list(lines),
description=description
))
class Events(RequestHandler):
def get(self):
self.write([logentry_to_json(e) for e in self.master.events.data])
class Settings(RequestHandler):
def get(self):
self.write(dict(
version=version.VERSION,
mode=str(self.master.options.mode),
intercept=self.master.options.intercept,
showhost=self.master.options.showhost,
no_upstream_cert=self.master.options.no_upstream_cert,
rawtcp=self.master.options.rawtcp,
http2=self.master.options.http2,
websocket=self.master.options.websocket,
anticache=self.master.options.anticache,
anticomp=self.master.options.anticomp,
stickyauth=self.master.options.stickyauth,
stickycookie=self.master.options.stickycookie,
stream=self.master.options.stream_large_bodies,
contentViews=[v.name.replace(' ', '_') for v in contentviews.views],
listen_host=self.master.options.listen_host,
listen_port=self.master.options.listen_port,
))
def put(self):
update = self.json
option_whitelist = {
"intercept", "showhost", "no_upstream_cert",
"rawtcp", "http2", "websocket", "anticache", "anticomp",
"stickycookie", "stickyauth", "stream_large_bodies"
}
for k in update:
if k not in option_whitelist:
raise APIError(400, "Unknown setting {}".format(k))
self.master.options.update(**update)
class Application(tornado.web.Application):
def __init__(self, master, debug):
self.master = master
handlers = [
(r"/", IndexHandler),
(r"/filter-help", FilterHelp),
(r"/updates", ClientConnection),
(r"/events", Events),
(r"/flows", Flows),
(r"/flows/dump", DumpFlows),
(r"/flows/resume", ResumeFlows),
(r"/flows/kill", KillFlows),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)", FlowHandler),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/resume", ResumeFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/kill", KillFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/duplicate", DuplicateFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/replay", ReplayFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/revert", RevertFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content", FlowContent),
(
r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content/(?P<content_view>[0-9a-zA-Z\-\_]+)",
FlowContentView),
(r"/settings", Settings),
(r"/clear", ClearAll),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=os.urandom(256),
debug=debug,
autoreload=False,
)
super().__init__(handlers, **settings)
| 1 | 12,477 | Should we move the decode part directly to the actual first-use of this? Or how does this affect if the value gets decoded and we need to get the bytes back later? | mitmproxy-mitmproxy | py |
@@ -78,6 +78,16 @@ $config = [
]
]
],
+ 'legacy-holds' => [
+ 'type' => 'Laminas\Router\Http\Literal',
+ 'options' => [
+ 'route' => '/MyResearch/Holds',
+ 'defaults' => [
+ 'controller' => 'Holds',
+ 'action' => 'List',
+ ]
+ ]
+ ],
'legacy-summonrecord' => [
'type' => 'Laminas\Router\Http\Literal',
'options' => [ | 1 | <?php
namespace VuFind\Module\Config;
$config = [
'router' => [
'routes' => [
'default' => [
'type' => 'Laminas\Router\Http\Segment',
'options' => [
'route' => '/[:controller[/[:action]]]',
'constraints' => [
'controller' => '[a-zA-Z][a-zA-Z0-9_-]*',
'action' => '[a-zA-Z][a-zA-Z0-9_-]*',
],
'defaults' => [
'controller' => 'index',
'action' => 'Home',
],
],
],
'alma-webhook' => [
'type' => 'Laminas\Router\Http\Segment',
'options' => [
'route' => '/Alma/Webhook/[:almaWebhookAction]',
'constraints' => [
'controller' => '[a-zA-Z][a-zA-Z0-9_-]*',
'action' => '[a-zA-Z][a-zA-Z0-9_-]*',
],
'defaults' => [
'controller' => 'Alma',
'action' => 'Webhook',
],
],
],
'content-page' => [
'type' => 'Laminas\Router\Http\Segment',
'options' => [
'route' => '/Content/[:page]',
'constraints' => [
'page' => '[a-zA-Z][a-zA-Z0-9_-]*',
],
'defaults' => [
'controller' => 'Content',
'action' => 'Content',
]
],
],
'shortlink' => [
'type' => 'Laminas\Router\Http\Segment',
'options' => [
'route' => '/short/[:id]',
'constraints' => [
'id' => '[a-zA-Z0-9]+',
],
'defaults' => [
'controller' => 'Shortlink',
'action' => 'redirect',
]
],
],
'legacy-alphabrowse-results' => [
'type' => 'Laminas\Router\Http\Literal',
'options' => [
'route' => '/AlphaBrowse/Results',
'defaults' => [
'controller' => 'Alphabrowse',
'action' => 'Home',
]
]
],
'legacy-bookcover' => [
'type' => 'Laminas\Router\Http\Literal',
'options' => [
'route' => '/bookcover.php',
'defaults' => [
'controller' => 'Cover',
'action' => 'Show',
]
]
],
'legacy-summonrecord' => [
'type' => 'Laminas\Router\Http\Literal',
'options' => [
'route' => '/Summon/Record',
'defaults' => [
'controller' => 'SummonRecord',
'action' => 'Home',
]
]
],
'legacy-worldcatrecord' => [
'type' => 'Laminas\Router\Http\Literal',
'options' => [
'route' => '/WorldCat/Record',
'defaults' => [
'controller' => 'WorldcatRecord',
'action' => 'Home',
]
]
],
'soap-shibboleth-logout-notification-handler' => [
'type' => 'Laminas\Router\Http\Literal',
'options' => [
'route' => '/soap/shiblogout',
'defaults' => [
'controller' => 'ShibbolethLogoutNotification',
'action' => 'index'
]
],
'child_routes' => [
'get' => [
'type' => 'method',
'options' => [
'verb' => 'get',
'defaults' => [
'action' => 'get'
],
],
],
'post' => [
'type' => 'method',
'options' => [
'verb' => 'post',
'defaults' => [
'action' => 'post'
]
]
]
]
]
],
],
'controllers' => [
'factories' => [
'VuFind\Controller\AjaxController' => 'VuFind\Controller\AjaxControllerFactory',
'VuFind\Controller\AlmaController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\AlphabrowseController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\AuthorController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\AuthorityController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\BrowseController' => 'VuFind\Controller\AbstractBaseWithConfigFactory',
'VuFind\Controller\BrowZineController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\CartController' => 'VuFind\Controller\CartControllerFactory',
'VuFind\Controller\ChannelsController' => 'VuFind\Controller\ChannelsControllerFactory',
'VuFind\Controller\CollectionController' => 'VuFind\Controller\AbstractBaseWithConfigFactory',
'VuFind\Controller\CollectionsController' => 'VuFind\Controller\AbstractBaseWithConfigFactory',
'VuFind\Controller\CombinedController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\ConfirmController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\ContentController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\CoverController' => 'VuFind\Controller\CoverControllerFactory',
'VuFind\Controller\EdsController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\EdsrecordController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\EITController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\EITrecordController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\ErrorController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\ExternalAuthController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\FeedbackController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\Search2Controller' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\Search2recordController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\Search2collectionController' => 'VuFind\Controller\AbstractBaseWithConfigFactory',
'VuFind\Controller\HelpController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\HierarchyController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\IndexController' => 'VuFind\Controller\IndexControllerFactory',
'VuFind\Controller\InstallController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\LibGuidesController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\LibraryCardsController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\MissingrecordController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\MyResearchController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\OaiController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\OverdriveController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\Pazpar2Controller' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\PrimoController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\PrimorecordController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\QRCodeController' => 'VuFind\Controller\QRCodeControllerFactory',
'VuFind\Controller\RecordController' => 'VuFind\Controller\AbstractBaseWithConfigFactory',
'VuFind\Controller\RecordsController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\RelaisController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\SearchController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\ShibbolethLogoutNotificationController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\ShortlinkController' => 'VuFind\Controller\AbstractBaseWithConfigFactory',
'VuFind\Controller\SummonController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\SummonrecordController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\TagController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\UpgradeController' => 'VuFind\Controller\UpgradeControllerFactory',
'VuFind\Controller\WebController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\WorldcatController' => 'VuFind\Controller\AbstractBaseFactory',
'VuFind\Controller\WorldcatrecordController' => 'VuFind\Controller\AbstractBaseFactory',
],
'initializers' => [
'VuFind\ServiceManager\ServiceInitializer',
],
'aliases' => [
'AJAX' => 'VuFind\Controller\AjaxController',
'ajax' => 'VuFind\Controller\AjaxController',
'Alma' => 'VuFind\Controller\AlmaController',
'alma' => 'VuFind\Controller\AlmaController',
'Alphabrowse' => 'VuFind\Controller\AlphabrowseController',
'alphabrowse' => 'VuFind\Controller\AlphabrowseController',
'Author' => 'VuFind\Controller\AuthorController',
'author' => 'VuFind\Controller\AuthorController',
'Authority' => 'VuFind\Controller\AuthorityController',
'authority' => 'VuFind\Controller\AuthorityController',
'Browse' => 'VuFind\Controller\BrowseController',
'browse' => 'VuFind\Controller\BrowseController',
'BrowZine' => 'VuFind\Controller\BrowZineController',
'browzine' => 'VuFind\Controller\BrowZineController',
'Cart' => 'VuFind\Controller\CartController',
'cart' => 'VuFind\Controller\CartController',
'Channels' => 'VuFind\Controller\ChannelsController',
'channels' => 'VuFind\Controller\ChannelsController',
'Collection' => 'VuFind\Controller\CollectionController',
'collection' => 'VuFind\Controller\CollectionController',
'Collections' => 'VuFind\Controller\CollectionsController',
'collections' => 'VuFind\Controller\CollectionsController',
'Combined' => 'VuFind\Controller\CombinedController',
'combined' => 'VuFind\Controller\CombinedController',
'Confirm' => 'VuFind\Controller\ConfirmController',
'confirm' => 'VuFind\Controller\ConfirmController',
'Content' => 'VuFind\Controller\ContentController',
'content' => 'VuFind\Controller\ContentController',
'Cover' => 'VuFind\Controller\CoverController',
'cover' => 'VuFind\Controller\CoverController',
'EDS' => 'VuFind\Controller\EdsController',
'eds' => 'VuFind\Controller\EdsController',
'EdsRecord' => 'VuFind\Controller\EdsrecordController',
'edsrecord' => 'VuFind\Controller\EdsrecordController',
'EIT' => 'VuFind\Controller\EITController',
'eit' => 'VuFind\Controller\EITController',
'EITRecord' => 'VuFind\Controller\EITrecordController',
'eitrecord' => 'VuFind\Controller\EITrecordController',
'Error' => 'VuFind\Controller\ErrorController',
'error' => 'VuFind\Controller\ErrorController',
'ExternalAuth' => 'VuFind\Controller\ExternalAuthController',
'externalauth' => 'VuFind\Controller\ExternalAuthController',
'Feedback' => 'VuFind\Controller\FeedbackController',
'feedback' => 'VuFind\Controller\FeedbackController',
'Search2' => 'VuFind\Controller\Search2Controller',
'search2' => 'VuFind\Controller\Search2Controller',
'Search2Collection' => 'VuFind\Controller\Search2collectionController',
'search2collection' => 'VuFind\Controller\Search2collectionController',
'Search2Record' => 'VuFind\Controller\Search2recordController',
'search2record' => 'VuFind\Controller\Search2recordController',
'Help' => 'VuFind\Controller\HelpController',
'help' => 'VuFind\Controller\HelpController',
'Hierarchy' => 'VuFind\Controller\HierarchyController',
'hierarchy' => 'VuFind\Controller\HierarchyController',
'Index' => 'VuFind\Controller\IndexController',
'index' => 'VuFind\Controller\IndexController',
'Install' => 'VuFind\Controller\InstallController',
'install' => 'VuFind\Controller\InstallController',
'LibGuides' => 'VuFind\Controller\LibGuidesController',
'libguides' => 'VuFind\Controller\LibGuidesController',
'LibraryCards' => 'VuFind\Controller\LibraryCardsController',
'librarycards' => 'VuFind\Controller\LibraryCardsController',
'MissingRecord' => 'VuFind\Controller\MissingrecordController',
'missingrecord' => 'VuFind\Controller\MissingrecordController',
'MyResearch' => 'VuFind\Controller\MyResearchController',
'myresearch' => 'VuFind\Controller\MyResearchController',
'OAI' => 'VuFind\Controller\OaiController',
'oai' => 'VuFind\Controller\OaiController',
'Overdrive' => 'VuFind\Controller\OverdriveController',
'overdrive' => 'VuFind\Controller\OverdriveController',
'Pazpar2' => 'VuFind\Controller\Pazpar2Controller',
'pazpar2' => 'VuFind\Controller\Pazpar2Controller',
'Primo' => 'VuFind\Controller\PrimoController',
'primo' => 'VuFind\Controller\PrimoController',
'PrimoRecord' => 'VuFind\Controller\PrimorecordController',
'primorecord' => 'VuFind\Controller\PrimorecordController',
'QRCode' => 'VuFind\Controller\QRCodeController',
'qrcode' => 'VuFind\Controller\QRCodeController',
'Record' => 'VuFind\Controller\RecordController',
'record' => 'VuFind\Controller\RecordController',
'Records' => 'VuFind\Controller\RecordsController',
'records' => 'VuFind\Controller\RecordsController',
'Relais' => 'VuFind\Controller\RelaisController',
'relais' => 'VuFind\Controller\RelaisController',
'Search' => 'VuFind\Controller\SearchController',
'search' => 'VuFind\Controller\SearchController',
'ShibbolethLogoutNotification' => 'VuFind\Controller\ShibbolethLogoutNotificationController',
'shibbolethlogoutnotification' => 'VuFind\Controller\ShibbolethLogoutNotificationController',
'Shortlink' => 'VuFind\Controller\ShortlinkController',
'shortlink' => 'VuFind\Controller\ShortlinkController',
'Summon' => 'VuFind\Controller\SummonController',
'summon' => 'VuFind\Controller\SummonController',
'SummonRecord' => 'VuFind\Controller\SummonrecordController',
'summonrecord' => 'VuFind\Controller\SummonrecordController',
'Tag' => 'VuFind\Controller\TagController',
'tag' => 'VuFind\Controller\TagController',
'Upgrade' => 'VuFind\Controller\UpgradeController',
'upgrade' => 'VuFind\Controller\UpgradeController',
'Web' => 'VuFind\Controller\WebController',
'web' => 'VuFind\Controller\WebController',
'Worldcat' => 'VuFind\Controller\WorldcatController',
'worldcat' => 'VuFind\Controller\WorldcatController',
'WorldcatRecord' => 'VuFind\Controller\WorldcatrecordController',
'worldcatrecord' => 'VuFind\Controller\WorldcatrecordController',
],
],
'controller_plugins' => [
'factories' => [
'VuFind\Controller\Plugin\Captcha' => 'VuFind\Controller\Plugin\CaptchaFactory',
'VuFind\Controller\Plugin\DbUpgrade' => 'Laminas\ServiceManager\Factory\InvokableFactory',
'VuFind\Controller\Plugin\Favorites' => 'VuFind\Controller\Plugin\FavoritesFactory',
'VuFind\Controller\Plugin\Followup' => 'VuFind\Controller\Plugin\FollowupFactory',
'VuFind\Controller\Plugin\Holds' => 'VuFind\Controller\Plugin\AbstractRequestBaseFactory',
'VuFind\Controller\Plugin\ILLRequests' => 'VuFind\Controller\Plugin\AbstractRequestBaseFactory',
'VuFind\Controller\Plugin\NewItems' => 'VuFind\Controller\Plugin\NewItemsFactory',
'VuFind\Controller\Plugin\Permission' => 'VuFind\Controller\Plugin\PermissionFactory',
'VuFind\Controller\Plugin\Renewals' => 'Laminas\ServiceManager\Factory\InvokableFactory',
'VuFind\Controller\Plugin\Reserves' => 'VuFind\Controller\Plugin\ReservesFactory',
'VuFind\Controller\Plugin\ResultScroller' => 'VuFind\Controller\Plugin\ResultScrollerFactory',
'VuFind\Controller\Plugin\StorageRetrievalRequests' => 'VuFind\Controller\Plugin\AbstractRequestBaseFactory',
'Laminas\Mvc\Plugin\FlashMessenger\FlashMessenger' => 'VuFind\Controller\Plugin\FlashMessengerFactory',
],
'initializers' => [
'VuFind\ServiceManager\ServiceInitializer',
],
'aliases' => [
'captcha' => 'VuFind\Controller\Plugin\Captcha',
'dbUpgrade' => 'VuFind\Controller\Plugin\DbUpgrade',
'favorites' => 'VuFind\Controller\Plugin\Favorites',
'flashMessenger' => 'Laminas\Mvc\Plugin\FlashMessenger\FlashMessenger',
'followup' => 'VuFind\Controller\Plugin\Followup',
'holds' => 'VuFind\Controller\Plugin\Holds',
'ILLRequests' => 'VuFind\Controller\Plugin\ILLRequests',
'newItems' => 'VuFind\Controller\Plugin\NewItems',
'permission' => 'VuFind\Controller\Plugin\Permission',
'renewals' => 'VuFind\Controller\Plugin\Renewals',
'reserves' => 'VuFind\Controller\Plugin\Reserves',
'resultScroller' => 'VuFind\Controller\Plugin\ResultScroller',
'storageRetrievalRequests' => 'VuFind\Controller\Plugin\StorageRetrievalRequests',
],
],
'service_manager' => [
'allow_override' => true,
'factories' => [
'League\CommonMark\MarkdownConverterInterface' => 'VuFind\Service\MarkdownFactory',
'ProxyManager\Configuration' => 'VuFind\Service\ProxyConfigFactory',
'VuFind\AjaxHandler\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Auth\EmailAuthenticator' => 'VuFind\Auth\EmailAuthenticatorFactory',
'VuFind\Auth\ILSAuthenticator' => 'VuFind\Auth\ILSAuthenticatorFactory',
'VuFind\Auth\Manager' => 'VuFind\Auth\ManagerFactory',
'VuFind\Auth\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Autocomplete\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Autocomplete\Suggester' => 'VuFind\Autocomplete\SuggesterFactory',
'VuFind\Cache\Manager' => 'VuFind\Cache\ManagerFactory',
'VuFind\Captcha\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Cart' => 'VuFind\CartFactory',
'VuFind\ChannelProvider\ChannelLoader' => 'VuFind\ChannelProvider\ChannelLoaderFactory',
'VuFind\ChannelProvider\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Config\AccountCapabilities' => 'VuFind\Config\AccountCapabilitiesFactory',
'VuFind\Config\PluginManager' => 'VuFind\Config\PluginManagerFactory',
'VuFind\Config\SearchSpecsReader' => 'VuFind\Config\YamlReaderFactory',
'VuFind\Config\YamlReader' => 'VuFind\Config\YamlReaderFactory',
'VuFind\Connection\Relais' => 'VuFind\Connection\RelaisFactory',
'VuFind\Connection\WorldCatUtils' => 'VuFind\Connection\WorldCatUtilsFactory',
'VuFind\Content\PageLocator' => 'VuFind\Content\PageLocatorFactory',
'VuFind\Content\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Content\AuthorNotes\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Content\Covers\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Content\Excerpts\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Content\ObalkyKnihService' => 'VuFind\Content\ObalkyKnihServiceFactory',
'VuFind\Content\Reviews\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Content\Summaries\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Content\TOC\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\ContentBlock\BlockLoader' => 'VuFind\ContentBlock\BlockLoaderFactory',
'VuFind\ContentBlock\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Cookie\CookieManager' => 'VuFind\Cookie\CookieManagerFactory',
'VuFind\Cover\CachingProxy' => 'VuFind\Cover\CachingProxyFactory',
'VuFind\Cover\Generator' => 'VuFind\Cover\GeneratorFactory',
'VuFind\Cover\Layer\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Cover\Loader' => 'VuFind\Cover\LoaderFactory',
'VuFind\Cover\Router' => 'VuFind\Cover\RouterFactory',
'VuFind\Crypt\HMAC' => 'VuFind\Crypt\HMACFactory',
'VuFind\Date\Converter' => 'VuFind\Service\DateConverterFactory',
'VuFind\Db\AdapterFactory' => 'VuFind\Service\ServiceWithConfigIniFactory',
'VuFind\Db\Row\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Db\Table\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\DigitalContent\OverdriveConnector' => 'VuFind\DigitalContent\OverdriveConnectorFactory',
'VuFind\DoiLinker\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Export' => 'VuFind\ExportFactory',
'VuFind\Favorites\FavoritesService' => 'VuFind\Favorites\FavoritesServiceFactory',
'VuFind\Form\Form' => 'VuFind\Form\FormFactory',
'VuFind\GeoFeatures\BasemapConfig' => 'VuFind\GeoFeatures\AbstractConfigFactory',
'VuFind\GeoFeatures\MapTabConfig' => 'VuFind\GeoFeatures\AbstractConfigFactory',
'VuFind\GeoFeatures\MapSelectionConfig' => 'VuFind\GeoFeatures\AbstractConfigFactory',
'VuFind\Hierarchy\Driver\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Hierarchy\TreeDataFormatter\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Hierarchy\TreeDataSource\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Hierarchy\TreeRenderer\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Http\PhpEnvironment\Request' => 'Laminas\ServiceManager\Factory\InvokableFactory',
'VuFind\I18n\Locale\LocaleSettings' => 'VuFind\Service\ServiceWithConfigIniFactory',
'VuFind\ILS\Connection' => 'VuFind\ILS\ConnectionFactory',
'VuFind\ILS\Driver\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\ILS\Logic\Holds' => 'VuFind\ILS\Logic\LogicFactory',
'VuFind\ILS\Logic\TitleHolds' => 'VuFind\ILS\Logic\LogicFactory',
'VuFind\ILS\HoldSettings' => 'VuFind\ILS\HoldSettingsFactory',
'VuFind\Log\Logger' => 'VuFind\Log\LoggerFactory',
'VuFind\Mailer\Mailer' => 'VuFind\Mailer\Factory',
'VuFind\MetadataVocabulary\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Net\IpAddressUtils' => 'Laminas\ServiceManager\Factory\InvokableFactory',
'VuFind\Net\UserIpReader' => 'VuFind\Net\UserIpReaderFactory',
'VuFind\OAI\Server' => 'VuFind\OAI\ServerFactory',
'VuFind\OAI\Server\Auth' => 'VuFind\OAI\ServerFactory',
'VuFind\QRCode\Loader' => 'VuFind\QRCode\LoaderFactory',
'VuFind\Recommend\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Record\Cache' => 'VuFind\Record\CacheFactory',
'VuFind\Record\FallbackLoader\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Record\Loader' => 'VuFind\Record\LoaderFactory',
'VuFind\Record\Router' => 'VuFind\Service\ServiceWithConfigIniFactory',
'VuFind\RecordDriver\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\RecordTab\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\RecordTab\TabManager' => 'VuFind\RecordTab\TabManagerFactory',
'VuFind\Related\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Resolver\Driver\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Role\PermissionManager' => 'VuFind\Role\PermissionManagerFactory',
'VuFind\Role\PermissionDeniedManager' => 'VuFind\Role\PermissionDeniedManagerFactory',
'VuFind\Search\BackendManager' => 'VuFind\Search\BackendManagerFactory',
'VuFind\Search\History' => 'VuFind\Search\HistoryFactory',
'VuFind\Search\Memory' => 'VuFind\Search\MemoryFactory',
'VuFind\Search\FacetCache\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Search\Factory\UrlQueryHelperFactory' => 'Laminas\ServiceManager\Factory\InvokableFactory',
'VuFind\Search\Options\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Search\Params\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Search\Results\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Search\Solr\HierarchicalFacetHelper' => 'Laminas\ServiceManager\Factory\InvokableFactory',
'VuFind\Search\SearchRunner' => 'VuFind\Search\SearchRunnerFactory',
'VuFind\Search\SearchTabsHelper' => 'VuFind\Search\SearchTabsHelperFactory',
'VuFind\Security\CspHeaderGenerator' => 'VuFind\Security\CspHeaderGeneratorFactory',
'VuFind\Security\NonceGenerator' => 'Laminas\ServiceManager\Factory\InvokableFactory',
'VuFind\Service\ReCaptcha' => 'VuFind\Service\ReCaptchaFactory',
'VuFind\Session\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\Session\Settings' => 'Laminas\ServiceManager\Factory\InvokableFactory',
'VuFind\Sitemap\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\SMS\SMSInterface' => 'VuFind\SMS\Factory',
'VuFind\Solr\Writer' => 'VuFind\Solr\WriterFactory',
'VuFind\Tags' => 'VuFind\TagsFactory',
'VuFind\UrlShortener\PluginManager' => 'VuFind\ServiceManager\AbstractPluginManagerFactory',
'VuFind\UrlShortener\UrlShortenerInterface' => 'VuFind\UrlShortener\ServiceFactory',
'VuFind\Validator\Csrf' => 'VuFind\Validator\CsrfFactory',
'VuFindHttp\HttpService' => 'VuFind\Service\HttpServiceFactory',
'VuFindSearch\Service' => 'VuFind\Service\SearchServiceFactory',
'Laminas\Db\Adapter\Adapter' => 'VuFind\Db\AdapterFactory',
'Laminas\Http\PhpEnvironment\RemoteAddress' => 'VuFind\Http\PhpEnvironment\RemoteAddressFactory',
'Laminas\Session\SessionManager' => 'VuFind\Session\ManagerFactory',
],
'delegators' => [
'Laminas\I18n\Translator\TranslatorInterface' => [
'VuFind\I18n\Translator\TranslatorFactory',
],
'SlmLocale\Locale\Detector' => [
'VuFind\I18n\Locale\LocaleDetectorFactory',
],
],
'initializers' => [
'VuFind\ServiceManager\ServiceInitializer',
],
'aliases' => [
'Request' => 'VuFind\Http\PhpEnvironment\Request',
'VuFind\AccountCapabilities' => 'VuFind\Config\AccountCapabilities',
'VuFind\AuthManager' => 'VuFind\Auth\Manager',
'VuFind\AuthPluginManager' => 'VuFind\Auth\PluginManager',
'VuFind\AutocompletePluginManager' => 'VuFind\Autocomplete\PluginManager',
'VuFind\CacheManager' => 'VuFind\Cache\Manager',
'VuFind\ChannelProviderPluginManager' => 'VuFind\ChannelProvider\PluginManager',
'VuFind\Config' => 'VuFind\Config\PluginManager',
'VuFind\ContentPluginManager' => 'VuFind\Content\PluginManager',
'VuFind\ContentAuthorNotesPluginManager' => 'VuFind\Content\AuthorNotes\PluginManager',
'VuFind\ContentCoversPluginManager' => 'VuFind\Content\Covers\PluginManager',
'VuFind\ContentExcerptsPluginManager' => 'VuFind\Content\Excerpts\PluginManager',
'VuFind\ContentReviewsPluginManager' => 'VuFind\Content\Reviews\PluginManager',
'VuFind\ContentSummariesPluginManager' => 'VuFind\Content\Summaries\PluginManager',
'VuFind\ContentTOCPluginManager' => 'VuFind\Content\TOC\PluginManager',
'VuFind\CookieManager' => 'VuFind\Cookie\CookieManager',
'VuFind\DateConverter' => 'VuFind\Date\Converter',
'VuFind\DbAdapter' => 'Laminas\Db\Adapter\Adapter',
'VuFind\DbAdapterFactory' => 'VuFind\Db\AdapterFactory',
'VuFind\DbRowPluginManager' => 'VuFind\Db\Row\PluginManager',
'VuFind\DbTablePluginManager' => 'VuFind\Db\Table\PluginManager',
'VuFind\HierarchicalFacetHelper' => 'VuFind\Search\Solr\HierarchicalFacetHelper',
'VuFind\HierarchyDriverPluginManager' => 'VuFind\Hierarchy\Driver\PluginManager',
'VuFind\HierarchyTreeDataFormatterPluginManager' => 'VuFind\Hierarchy\TreeDataFormatter\PluginManager',
'VuFind\HierarchyTreeDataSourcePluginManager' => 'VuFind\Hierarchy\TreeDataSource\PluginManager',
'VuFind\HierarchyTreeRendererPluginManager' => 'VuFind\Hierarchy\TreeRenderer\PluginManager',
'VuFind\HMAC' => 'VuFind\Crypt\HMAC',
'VuFind\Http' => 'VuFindHttp\HttpService',
'VuFind\ILSAuthenticator' => 'VuFind\Auth\ILSAuthenticator',
'VuFind\ILSConnection' => 'VuFind\ILS\Connection',
'VuFind\ILSDriverPluginManager' => 'VuFind\ILS\Driver\PluginManager',
'VuFind\ILSHoldLogic' => 'VuFind\ILS\Logic\Holds',
'VuFind\ILSHoldSettings' => 'VuFind\ILS\HoldSettings',
'VuFind\ILSTitleHoldLogic' => 'VuFind\ILS\Logic\TitleHolds',
'VuFind\IpAddressUtils' => 'VuFind\Net\IpAddressUtils',
'VuFind\Logger' => 'VuFind\Log\Logger',
'VuFind\Mailer' => 'VuFind\Mailer\Mailer',
'VuFind\ProxyConfig' => 'ProxyManager\Configuration',
'VuFind\Recaptcha' => 'VuFind\Service\ReCaptcha',
'VuFind\RecommendPluginManager' => 'VuFind\Recommend\PluginManager',
'VuFind\RecordCache' => 'VuFind\Record\Cache',
'VuFind\RecordDriverPluginManager' => 'VuFind\RecordDriver\PluginManager',
'VuFind\RecordLoader' => 'VuFind\Record\Loader',
'VuFind\RecordRouter' => 'VuFind\Record\Router',
'VuFind\RecordTabPluginManager' => 'VuFind\RecordTab\PluginManager',
'VuFind\RelatedPluginManager' => 'VuFind\Related\PluginManager',
'VuFind\ResolverDriverPluginManager' => 'VuFind\Resolver\Driver\PluginManager',
'VuFind\Search' => 'VuFindSearch\Service',
'VuFind\SearchOptionsPluginManager' => 'VuFind\Search\Options\PluginManager',
'VuFind\SearchParamsPluginManager' => 'VuFind\Search\Params\PluginManager',
'VuFind\SearchResultsPluginManager' => 'VuFind\Search\Results\PluginManager',
'VuFind\SearchRunner' => 'VuFind\Search\SearchRunner',
'VuFind\SearchSpecsReader' => 'VuFind\Config\SearchSpecsReader',
'VuFind\SearchTabsHelper' => 'VuFind\Search\SearchTabsHelper',
'VuFind\SessionManager' => 'Laminas\Session\SessionManager',
'VuFind\SessionPluginManager' => 'VuFind\Session\PluginManager',
'VuFind\SMS' => 'VuFind\SMS\SMSInterface',
'VuFind\Translator' => 'Laminas\Mvc\I18n\Translator',
'VuFind\WorldCatUtils' => 'VuFind\Connection\WorldCatUtils',
'VuFind\YamlReader' => 'VuFind\Config\YamlReader',
'Laminas\Validator\Csrf' => 'VuFind\Validator\Csrf',
],
],
'translator' => [],
'translator_plugins' => [
'factories' => [
'VuFind\I18n\Translator\Loader\ExtendedIni' => 'VuFind\I18n\Translator\Loader\ExtendedIniFactory',
],
'aliases' => [
'ExtendedIni' => 'VuFind\I18n\Translator\Loader\ExtendedIni'
],
],
'view_helpers' => [
'initializers' => [
'VuFind\ServiceManager\ServiceInitializer',
],
],
'view_manager' => [
'display_not_found_reason' => APPLICATION_ENV == 'development',
'display_exceptions' => APPLICATION_ENV == 'development',
'not_found_template' => 'error/404',
'exception_template' => 'error/index',
'template_path_stack' => [],
'whoops_no_catch' => [
'VuFind\Exception\RecordMissing',
],
],
// This section contains all VuFind-specific settings (i.e. configurations
// unrelated to specific Laminas components).
'vufind' => [
// The config reader is a special service manager for loading .ini files:
'config_reader' => [ /* see VuFind\Config\PluginManager for defaults */ ],
// PostgreSQL sequence mapping
'pgsql_seq_mapping' => [
'auth_hash' => ['id', 'auth_hash_id_seq'],
'comments' => ['id', 'comments_id_seq'],
'external_session' => ['id', 'external_session_id_seq'],
'oai_resumption' => ['id', 'oai_resumption_id_seq'],
'record' => ['id', 'record_id_seq'],
'resource' => ['id', 'resource_id_seq'],
'resource_tags' => ['id', 'resource_tags_id_seq'],
'search' => ['id', 'search_id_seq'],
'session' => ['id', 'session_id_seq'],
'shortlinks' => ['id', 'shortlinks_id_seq'],
'tags' => ['id', 'tags_id_seq'],
'user' => ['id', 'user_id_seq'],
'user_card' => ['id', 'user_card_id_seq'],
'user_list' => ['id', 'user_list_id_seq'],
'user_resource' => ['id', 'user_resource_id_seq'],
],
// This section contains service manager configurations for all VuFind
// pluggable components:
'plugin_managers' => [
'ajaxhandler' => [ /* see VuFind\AjaxHandler\PluginManager for defaults */ ],
'auth' => [ /* see VuFind\Auth\PluginManager for defaults */ ],
'autocomplete' => [ /* see VuFind\Autocomplete\PluginManager for defaults */ ],
'captcha' => [ /* see VuFind\Captcha\PluginManager for defaults */ ],
'channelprovider' => [ /* see VuFind\ChannelProvider\PluginManager for defaults */ ],
'content' => [ /* see VuFind\Content\PluginManager for defaults */ ],
'content_authornotes' => [ /* see VuFind\Content\AuthorNotes\PluginManager for defaults */ ],
'content_covers' => [ /* see VuFind\Content\Covers\PluginManager for defaults */ ],
'content_excerpts' => [ /* see VuFind\Content\Excerpts\PluginManager for defaults */ ],
'content_reviews' => [ /* see VuFind\Content\Reviews\PluginManager for defaults */ ],
'content_summaries' => [ /* see VuFind\Content\Summaries\PluginManager for defaults */ ],
'content_toc' => [ /* see VuFind\Content\TOC\PluginManager for defaults */ ],
'contentblock' => [ /* see VuFind\ContentBlock\PluginManager for defaults */ ],
'cover_layer' => [ /* see VuFind\Cover\Layer\PluginManager for defaults */ ],
'db_row' => [ /* see VuFind\Db\Row\PluginManager for defaults */ ],
'db_table' => [ /* see VuFind\Db\Table\PluginManager for defaults */ ],
'doilinker' => [ /* see VuFind\DoiLinker\PluginManager for defaults */ ],
'hierarchy_driver' => [ /* see VuFind\Hierarchy\Driver\PluginManager for defaults */ ],
'hierarchy_treedataformatter' => [ /* see VuFind\Hierarchy\TreeDataFormatter\PluginManager for defaults */ ],
'hierarchy_treedatasource' => [ /* see VuFind\Hierarchy\TreeDataSource\PluginManager for defaults */ ],
'hierarchy_treerenderer' => [ /* see VuFind\Hierarchy\TreeRenderer\PluginManager for defaults */ ],
'ils_driver' => [ /* See VuFind\ILS\Driver\PluginManager for defaults */ ],
'metadatavocabulary' => [ /* See VuFind\MetadataVocabulary\PluginManager for defaults */],
'recommend' => [ /* See VuFind\Recommend\PluginManager for defaults */ ],
'record_fallbackloader' => [ /* See VuFind\Record\FallbackLoader\PluginManager for defaults */ ],
'recorddriver' => [ /* See VuFind\RecordDriver\PluginManager for defaults */ ],
'recordtab' => [ /* See VuFind\RecordTab\PluginManager for defaults */ ],
'related' => [ /* See VuFind\Related\PluginManager for defaults */ ],
'resolver_driver' => [ /* See VuFind\Resolver\Driver\PluginManager for defaults */ ],
'search_backend' => [ /* See VuFind\Search\BackendRegistry for defaults */ ],
'search_facetcache' => [ /* See VuFind\Search\FacetCache\PluginManager for defaults */ ],
'search_options' => [ /* See VuFind\Search\Options\PluginManager for defaults */ ],
'search_params' => [ /* See VuFind\Search\Params\PluginManager for defaults */ ],
'search_results' => [ /* See VuFind\Search\Results\PluginManager for defaults */ ],
'session' => [ /* see VuFind\Session\PluginManager for defaults */ ],
'sitemap' => [ /* see VuFind\Sitemap\PluginManager for defaults */ ],
'urlshortener' => [ /* see VuFind\UrlShortener\PluginManager for defaults */ ],
],
],
// Authorization configuration:
'lmc_rbac' => [
'identity_provider' => 'VuFind\Auth\Manager',
'guest_role' => 'guest',
'role_provider' => [
'VuFind\Role\DynamicRoleProvider' => [
'map_legacy_settings' => true,
],
],
'role_provider_manager' => [
'factories' => [
'VuFind\Role\DynamicRoleProvider' => 'VuFind\Role\DynamicRoleProviderFactory',
],
],
'vufind_permission_provider_manager' => [ /* see VuFind\Role\PermissionProvider\PluginManager for defaults */ ],
],
];
// Define record view routes -- route name => controller
$recordRoutes = [
'record' => 'Record',
'collection' => 'Collection',
'edsrecord' => 'EdsRecord',
'eitrecord' => 'EITRecord',
'missingrecord' => 'MissingRecord',
'primorecord' => 'PrimoRecord',
'solrauthrecord' => 'Authority',
'summonrecord' => 'SummonRecord',
'worldcatrecord' => 'WorldcatRecord',
'search2record' => 'Search2Record',
'search2collection' => 'Search2Collection',
'search2collectionrecord' => 'Search2Record',
];
// Define dynamic routes -- controller => [route name => action]
$dynamicRoutes = [
'Feedback' => ['feedback-form' => 'Form/[:id]'],
'MyResearch' => ['userList' => 'MyList/[:id]', 'editList' => 'EditList/[:id]'],
'LibraryCards' => ['editLibraryCard' => 'editCard/[:id]'],
];
// Define static routes -- Controller/Action strings
$staticRoutes = [
'Alphabrowse/Home', 'Author/FacetList', 'Author/Home', 'Author/Search',
'Authority/FacetList', 'Authority/Home', 'Authority/Record', 'Authority/Search',
'Browse/Author', 'Browse/Dewey', 'Browse/Era', 'Browse/Genre', 'Browse/Home',
'Browse/LCC', 'Browse/Region', 'Browse/Tag', 'Browse/Topic', 'Cart/doExport',
'BrowZine/Home', 'BrowZine/Search',
'Cart/Email', 'Cart/Export', 'Cart/Home', 'Cart/MyResearchBulk',
'Cart/Processor', 'Cart/Save', 'Cart/SearchResultsBulk',
'Channels/Home', 'Channels/Record', 'Channels/Search',
'Collections/ByTitle',
'Collections/Home', 'Combined/Home', 'Combined/Results', 'Combined/SearchBox',
'Confirm/Confirm', 'Cover/Show', 'Cover/Unavailable',
'EDS/Advanced', 'EDS/Home', 'EDS/Search',
'EIT/Advanced', 'EIT/Home', 'EIT/Search',
'Error/PermissionDenied', 'Error/Unavailable',
'Feedback/Email', 'Feedback/Home', 'Help/Home',
'Install/Done', 'Install/FixBasicConfig', 'Install/FixCache',
'Install/FixDatabase', 'Install/FixDependencies', 'Install/FixILS',
'Install/FixSecurity', 'Install/FixSolr', 'Install/FixSSLCerts', 'Install/Home',
'Install/PerformSecurityFix', 'Install/ShowSQL',
'LibGuides/Home', 'LibGuides/Results',
'LibraryCards/Home', 'LibraryCards/SelectCard',
'LibraryCards/ConnectCard', 'LibraryCards/ConnectCardLogin',
'LibraryCards/DeleteCard',
'MyResearch/Account', 'MyResearch/ChangeEmail', 'MyResearch/ChangePassword',
'MyResearch/CheckedOut', 'MyResearch/Delete', 'MyResearch/DeleteAccount',
'MyResearch/DeleteList', 'MyResearch/Edit', 'MyResearch/Email',
'MyResearch/EmailNotVerified', 'MyResearch/Favorites',
'MyResearch/Fines', 'MyResearch/HistoricLoans', 'MyResearch/Holds',
'MyResearch/Home', 'MyResearch/ILLRequests', 'MyResearch/Logout',
'MyResearch/NewPassword', 'MyResearch/Profile',
'MyResearch/Recover', 'MyResearch/SaveSearch',
'MyResearch/StorageRetrievalRequests',
'MyResearch/Unsubscribe', 'MyResearch/UserLogin',
'MyResearch/Verify', 'MyResearch/VerifyEmail', 'OAI/Server',
'Overdrive/MyContent','Overdrive/Hold',
'Pazpar2/Home', 'Pazpar2/Search',
'Primo/Advanced', 'Primo/Home', 'Primo/Search',
'QRCode/Show', 'QRCode/Unavailable', 'Records/Home',
'Relais/Login', 'Relais/Request',
'Search/Advanced', 'Search/CollectionFacetList',
'Search/EditMemory', 'Search/Email',
'Search/FacetList', 'Search/History', 'Search/Home', 'Search/NewItem',
'Search/OpenSearch', 'Search/Reserves', 'Search/ReservesFacetList',
'Search/Results', 'Search/Suggest', 'Search/Versions',
'Search2/Advanced', 'Search2/FacetList', 'Search2/Home', 'Search2/Results',
'Search2/Versions',
'Summon/Advanced', 'Summon/FacetList', 'Summon/Home', 'Summon/Search',
'Tag/Home',
'Upgrade/Home', 'Upgrade/FixAnonymousTags', 'Upgrade/FixDuplicateTags',
'Upgrade/FixConfig', 'Upgrade/FixDatabase', 'Upgrade/FixMetadata',
'Upgrade/GetDBCredentials', 'Upgrade/GetDbEncodingPreference',
'Upgrade/GetSourceDir', 'Upgrade/GetSourceVersion', 'Upgrade/Reset',
'Upgrade/ShowSQL',
'Web/Home', 'Web/FacetList', 'Web/Results',
'Worldcat/Advanced', 'Worldcat/Home', 'Worldcat/Search'
];
$routeGenerator = new \VuFind\Route\RouteGenerator();
$routeGenerator->addRecordRoutes($config, $recordRoutes);
$routeGenerator->addDynamicRoutes($config, $dynamicRoutes);
$routeGenerator->addStaticRoutes($config, $staticRoutes);
// Add the home route last
$config['router']['routes']['home'] = [
'type' => 'Laminas\Router\Http\Literal',
'options' => [
'route' => '/',
'defaults' => [
'controller' => 'index',
'action' => 'Home',
]
]
];
return $config;
| 1 | 31,805 | How would you feel about continuing to point this at the MyResearchController's holdsAction, but instead changing that action to force a redirect to the new holds-list route? That way, people will get sent to the new URL instead of having two different URLs that do the same thing. | vufind-org-vufind | php |
@@ -1196,6 +1196,14 @@ func (c *client) markConnAsClosed(reason ClosedState, skipFlush bool) bool {
if skipFlush {
c.flags.set(skipFlushOnClose)
}
+ // Be consistent with the creation: for routes and gateways,
+ // we use Noticef on create, so use that too for delete.
+ if c.kind == ROUTER || c.kind == GATEWAY {
+ c.Noticef("%s connection closed: %v", c.typeString(), reason.String())
+ } else { // Client and Leaf Node connections.
+ c.Debugf("%s connection closed: %v", c.typeString(), reason.String())
+ }
+
// Save off the connection if its a client or leafnode.
if c.kind == CLIENT || c.kind == LEAF {
if nc := c.nc; nc != nil && c.srv != nil { | 1 | // Copyright 2012-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt"
)
// Type of client connection.
const (
// CLIENT is an end user.
CLIENT = iota
// ROUTER represents another server in the cluster.
ROUTER
// GATEWAY is a link between 2 clusters.
GATEWAY
// SYSTEM is an internal system client.
SYSTEM
// LEAF is for leaf node connections.
LEAF
)
const (
// ClientProtoZero is the original Client protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
ClientProtoZero = iota
// ClientProtoInfo signals a client can receive more then the original INFO block.
// This can be used to update clients on other cluster members, etc.
ClientProtoInfo
)
const (
pingProto = "PING" + _CRLF_
pongProto = "PONG" + _CRLF_
errProto = "-ERR '%s'" + _CRLF_
okProto = "+OK" + _CRLF_
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
// Scratch buffer size for the processMsg() calls.
msgScratchSize = 1024
msgHeadProto = "RMSG "
msgHeadProtoLen = len(msgHeadProto)
// For controlling dynamic buffer sizes.
startBufSize = 512 // For INFO/CONNECT block
minBufSize = 64 // Smallest to shrink to for PING/PONG
maxBufSize = 65536 // 64k
shortsToShrink = 2 // Trigger to shrink dynamic buffers
maxFlushPending = 10 // Max fsps to have in order to wait for writeLoop
readLoopReport = 2 * time.Second
// Server should not send a PING (for RTT) before the first PONG has
// been sent to the client. However, in case some client libs don't
// send CONNECT+PING, cap the maximum time before server can send
// the RTT PING.
maxNoRTTPingBeforeFirstPong = 2 * time.Second
// For stalling fast producers
stallClientMinDuration = 100 * time.Millisecond
stallClientMaxDuration = time.Second
)
var readLoopReportThreshold = readLoopReport
// Represent client booleans with a bitmask
type clientFlag uint16
// Some client state represented as flags
const (
connectReceived clientFlag = 1 << iota // The CONNECT proto has been received
infoReceived // The INFO protocol has been received
firstPongSent // The first PONG has been sent
handshakeComplete // For TLS clients, indicate that the handshake is complete
flushOutbound // Marks client as having a flushOutbound call in progress.
noReconnect // Indicate that on close, this connection should not attempt a reconnect
closeConnection // Marks that closeConnection has already been called.
writeLoopStarted // Marks that the writeLoop has been started.
skipFlushOnClose // Marks that flushOutbound() should not be called on connection close.
expectConnect // Marks if this connection is expected to send a CONNECT
)
// set the flag (would be equivalent to set the boolean to true)
func (cf *clientFlag) set(c clientFlag) {
*cf |= c
}
// clear the flag (would be equivalent to set the boolean to false)
func (cf *clientFlag) clear(c clientFlag) {
*cf &= ^c
}
// isSet returns true if the flag is set, false otherwise
func (cf clientFlag) isSet(c clientFlag) bool {
return cf&c != 0
}
// setIfNotSet will set the flag `c` only if that flag was not already
// set and return true to indicate that the flag has been set. Returns
// false otherwise.
func (cf *clientFlag) setIfNotSet(c clientFlag) bool {
if *cf&c == 0 {
*cf |= c
return true
}
return false
}
// ClosedState is the reason client was closed. This will
// be passed into calls to clearConnection, but will only
// be stored in ConnInfo for monitoring.
type ClosedState int
const (
ClientClosed = ClosedState(iota + 1)
AuthenticationTimeout
AuthenticationViolation
TLSHandshakeError
SlowConsumerPendingBytes
SlowConsumerWriteDeadline
WriteError
ReadError
ParseError
StaleConnection
ProtocolViolation
BadClientProtocolVersion
WrongPort
MaxAccountConnectionsExceeded
MaxConnectionsExceeded
MaxPayloadExceeded
MaxControlLineExceeded
MaxSubscriptionsExceeded
DuplicateRoute
RouteRemoved
ServerShutdown
AuthenticationExpired
WrongGateway
MissingAccount
Revocation
)
// Some flags passed to processMsgResultsEx
const pmrNoFlag int = 0
const (
pmrCollectQueueNames int = 1 << iota
pmrIgnoreEmptyQueueFilter
pmrAllowSendFromRouteToRoute
)
type client struct {
// Here first because of use of atomics, and memory alignment.
stats
// Indicate if we should check gwrm or not. Since checking gwrm is done
// when processing inbound messages and requires the lock we want to
// check only when needed. This is set/get using atomic, so needs to
// be memory aligned.
cgwrt int32
mpay int32
msubs int32
mcl int32
mu sync.Mutex
kind int
cid uint64
opts clientOpts
start time.Time
nonce []byte
nc net.Conn
ncs string
out outbound
srv *Server
acc *Account
user *NkeyUser
host string
port uint16
subs map[string]*subscription
perms *permissions
replies map[string]*resp
mperms *msgDeny
darray []string
in readCache
pcd map[*client]struct{}
atmr *time.Timer
ping pinfo
msgb [msgScratchSize]byte
last time.Time
parseState
rtt time.Duration
rttStart time.Time
rrTracking map[string]*remoteLatency
rrMax int
route *route
gw *gateway
leaf *leaf
// To keep track of gateway replies mapping
gwrm map[string]*gwReplyMap
flags clientFlag // Compact booleans into a single field. Size will be increased when needed.
trace bool
echo bool
}
// Struct for PING initiation from the server.
type pinfo struct {
tmr *time.Timer
last time.Time
out int
}
// outbound holds pending data for a socket.
type outbound struct {
p []byte // Primary write buffer
s []byte // Secondary for use post flush
nb net.Buffers // net.Buffers for writev IO
sz int32 // limit size per []byte, uses variable BufSize constants, start, min, max.
sws int32 // Number of short writes, used for dynamic resizing.
pb int64 // Total pending/queued bytes.
pm int32 // Total pending/queued messages.
fsp int32 // Flush signals that are pending per producer from readLoop's pcd.
sch chan struct{} // To signal writeLoop that there is data to flush.
wdl time.Duration // Snapshot of write deadline.
mp int64 // Snapshot of max pending for client.
lft time.Duration // Last flush time for Write.
stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in.
lwb int32 // Last byte size of Write.
}
type perm struct {
allow *Sublist
deny *Sublist
}
type permissions struct {
sub perm
pub perm
resp *ResponsePermission
pcache map[string]bool
}
// This is used to dynamically track responses and reply subjects
// for dynamic permissioning.
type resp struct {
t time.Time
n int
}
// msgDeny is used when a user permission for subscriptions has a deny
// clause but a subscription could be made that is of broader scope.
// e.g. deny = "foo", but user subscribes to "*". That subscription should
// succeed but no message sent on foo should be delivered.
type msgDeny struct {
deny *Sublist
dcache map[string]bool
}
// routeTarget collects information regarding routes and queue groups for
// sending information to a remote.
type routeTarget struct {
sub *subscription
qs []byte
_qs [32]byte
}
const (
maxResultCacheSize = 512
maxDenyPermCacheSize = 256
maxPermCacheSize = 128
pruneSize = 32
routeTargetInit = 8
replyPermLimit = 4096
)
// Used in readloop to cache hot subject lookups and group statistics.
type readCache struct {
// These are for clients who are bound to a single account.
genid uint64
results map[string]*SublistResult
// This is for routes and gateways to have their own L1 as well that is account aware.
pacache map[string]*perAccountCache
// This is for when we deliver messages across a route. We use this structure
// to make sure to only send one message and properly scope to queues as needed.
rts []routeTarget
prand *rand.Rand
// These are all temporary totals for an invocation of a read in readloop.
msgs int32
bytes int32
subs int32
rsz int32 // Read buffer size
srs int32 // Short reads, used for dynamic buffer resizing.
}
const (
defaultMaxPerAccountCacheSize = 4096
defaultPrunePerAccountCacheSize = 256
defaultClosedSubsCheckInterval = 5 * time.Minute
)
var (
maxPerAccountCacheSize = defaultMaxPerAccountCacheSize
prunePerAccountCacheSize = defaultPrunePerAccountCacheSize
closedSubsCheckInterval = defaultClosedSubsCheckInterval
)
// perAccountCache is for L1 semantics for inbound messages from a route or gateway to mimic the performance of clients.
type perAccountCache struct {
acc *Account
results *SublistResult
genid uint64
}
func (c *client) String() (id string) {
return c.ncs
}
// GetName returns the application supplied name for the connection.
func (c *client) GetName() string {
c.mu.Lock()
name := c.opts.Name
c.mu.Unlock()
return name
}
// GetOpts returns the client options provided by the application.
func (c *client) GetOpts() *clientOpts {
return &c.opts
}
// GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil
// otherwise. Implements the ClientAuth interface.
func (c *client) GetTLSConnectionState() *tls.ConnectionState {
tc, ok := c.nc.(*tls.Conn)
if !ok {
return nil
}
state := tc.ConnectionState()
return &state
}
// This is the main subscription struct that indicates
// interest in published messages.
// FIXME(dlc) - This is getting bloated for normal subs, need
// to optionally have an opts section for non-normal stuff.
type subscription struct {
client *client
im *streamImport // This is for import stream support.
shadow []*subscription // This is to track shadowed accounts.
subject []byte
queue []byte
sid []byte
nm int64
max int64
qw int32
closed int32
}
// Indicate that this subscription is closed.
// This is used in pruning of route and gateway cache items.
func (s *subscription) close() {
atomic.StoreInt32(&s.closed, 1)
}
// Return true if this subscription was unsubscribed
// or its connection has been closed.
func (s *subscription) isClosed() bool {
return atomic.LoadInt32(&s.closed) == 1
}
type clientOpts struct {
Echo bool `json:"echo"`
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
TLSRequired bool `json:"tls_required"`
Nkey string `json:"nkey,omitempty"`
JWT string `json:"jwt,omitempty"`
Sig string `json:"sig,omitempty"`
Authorization string `json:"auth_token,omitempty"`
Username string `json:"user,omitempty"`
Password string `json:"pass,omitempty"`
Name string `json:"name"`
Lang string `json:"lang"`
Version string `json:"version"`
Protocol int `json:"protocol"`
Account string `json:"account,omitempty"`
AccountNew bool `json:"new_account,omitempty"`
// Routes only
Import *SubjectPermission `json:"import,omitempty"`
Export *SubjectPermission `json:"export,omitempty"`
}
var defaultOpts = clientOpts{Verbose: true, Pedantic: true, Echo: true}
var internalOpts = clientOpts{Verbose: false, Pedantic: false, Echo: false}
func init() {
rand.Seed(time.Now().UnixNano())
}
func (c *client) setTraceLevel() {
if c.kind == SYSTEM && !(atomic.LoadInt32(&c.srv.logging.traceSysAcc) != 0) {
c.trace = false
} else {
c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0)
}
}
// Lock should be held
func (c *client) initClient() {
s := c.srv
c.cid = atomic.AddUint64(&s.gcid, 1)
// Outbound data structure setup
c.out.sz = startBufSize
c.out.sch = make(chan struct{}, 1)
opts := s.getOpts()
// Snapshots to avoid mutex access in fast paths.
c.out.wdl = opts.WriteDeadline
c.out.mp = opts.MaxPending
c.subs = make(map[string]*subscription)
c.echo = true
c.setTraceLevel()
// This is a scratch buffer used for processMsg()
// The msg header starts with "RMSG ", which can be used
// for both local and routes.
// in bytes that is [82 77 83 71 32].
c.msgb = [msgScratchSize]byte{82, 77, 83, 71, 32}
// This is to track pending clients that have data to be flushed
// after we process inbound msgs from our own connection.
c.pcd = make(map[*client]struct{})
// snapshot the string version of the connection
var conn string
if ip, ok := c.nc.(*net.TCPConn); ok {
conn = ip.RemoteAddr().String()
host, port, _ := net.SplitHostPort(conn)
iPort, _ := strconv.Atoi(port)
c.host, c.port = host, uint16(iPort)
}
switch c.kind {
case CLIENT:
c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid)
case ROUTER:
c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid)
case GATEWAY:
c.ncs = fmt.Sprintf("%s - gid:%d", conn, c.cid)
case LEAF:
c.ncs = fmt.Sprintf("%s - lid:%d", conn, c.cid)
case SYSTEM:
c.ncs = "SYSTEM"
}
}
// RemoteAddress expose the Address of the client connection,
// nil when not connected or unknown
func (c *client) RemoteAddress() net.Addr {
c.mu.Lock()
defer c.mu.Unlock()
if c.nc == nil {
return nil
}
return c.nc.RemoteAddr()
}
// Helper function to report errors.
func (c *client) reportErrRegisterAccount(acc *Account, err error) {
if err == ErrTooManyAccountConnections {
c.maxAccountConnExceeded()
return
}
c.Errorf("Problem registering with account [%s]", acc.Name)
c.sendErr("Failed Account Registration")
}
// registerWithAccount will register the given user with a specific
// account. This will change the subject namespace.
func (c *client) registerWithAccount(acc *Account) error {
if acc == nil || acc.sl == nil {
return ErrBadAccount
}
// If we were previously registered, usually to $G, do accounting here to remove.
if c.acc != nil {
if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil {
c.srv.decActiveAccounts()
}
}
c.mu.Lock()
kind := c.kind
srv := c.srv
c.acc = acc
c.applyAccountLimits()
c.mu.Unlock()
// Check if we have a max connections violation
if kind == CLIENT && acc.MaxTotalConnectionsReached() {
return ErrTooManyAccountConnections
} else if kind == LEAF && acc.MaxTotalLeafNodesReached() {
return ErrTooManyAccountConnections
}
// Add in new one.
if prev := acc.addClient(c); prev == 0 && srv != nil {
srv.incActiveAccounts()
}
return nil
}
// Helper to determine if we have met or exceeded max subs.
func (c *client) subsAtLimit() bool {
return c.msubs != jwt.NoLimit && len(c.subs) >= int(c.msubs)
}
// Apply account limits
// Lock is held on entry.
// FIXME(dlc) - Should server be able to override here?
func (c *client) applyAccountLimits() {
if c.acc == nil || (c.kind != CLIENT && c.kind != LEAF) {
return
}
// Set here, will need to fo checks for NoLimit.
if c.acc.msubs != jwt.NoLimit {
c.msubs = c.acc.msubs
}
if c.acc.mpay != jwt.NoLimit {
c.mpay = c.acc.mpay
}
s := c.srv
opts := s.getOpts()
// We check here if the server has an option set that is lower than the account limit.
if c.mpay != jwt.NoLimit && opts.MaxPayload != 0 && int32(opts.MaxPayload) < c.acc.mpay {
c.Errorf("Max Payload set to %d from server config which overrides %d from account claims", opts.MaxPayload, c.acc.mpay)
c.mpay = int32(opts.MaxPayload)
}
// We check here if the server has an option set that is lower than the account limit.
if c.msubs != jwt.NoLimit && opts.MaxSubs != 0 && opts.MaxSubs < int(c.acc.msubs) {
c.Errorf("Max Subscriptions set to %d from server config which overrides %d from account claims", opts.MaxSubs, c.acc.msubs)
c.msubs = int32(opts.MaxSubs)
}
if c.subsAtLimit() {
go func() {
c.maxSubsExceeded()
time.Sleep(20 * time.Millisecond)
c.closeConnection(MaxSubscriptionsExceeded)
}()
}
}
// RegisterUser allows auth to call back into a new client
// with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterUser(user *User) {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.reportErrRegisterAccount(user.Account, err)
return
}
}
c.mu.Lock()
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
} else {
c.setPermissions(user.Permissions)
}
c.mu.Unlock()
}
// RegisterNkey allows auth to call back into a new nkey
// client with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterNkeyUser(user *NkeyUser) error {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.reportErrRegisterAccount(user.Account, err)
return err
}
}
c.mu.Lock()
c.user = user
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
} else {
c.setPermissions(user.Permissions)
}
c.mu.Unlock()
return nil
}
func splitSubjectQueue(sq string) ([]byte, []byte, error) {
vals := strings.Fields(strings.TrimSpace(sq))
s := []byte(vals[0])
var q []byte
if len(vals) == 2 {
q = []byte(vals[1])
} else if len(vals) > 2 {
return nil, nil, fmt.Errorf("invalid subject-queue %q", sq)
}
return s, q, nil
}
// Initializes client.perms structure.
// Lock is held on entry.
func (c *client) setPermissions(perms *Permissions) {
if perms == nil {
return
}
c.perms = &permissions{}
c.perms.pcache = make(map[string]bool)
// Loop over publish permissions
if perms.Publish != nil {
if perms.Publish.Allow != nil {
c.perms.pub.allow = NewSublistWithCache()
}
for _, pubSubject := range perms.Publish.Allow {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.allow.Insert(sub)
}
if len(perms.Publish.Deny) > 0 {
c.perms.pub.deny = NewSublistWithCache()
}
for _, pubSubject := range perms.Publish.Deny {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.deny.Insert(sub)
}
}
// Check if we are allowed to send responses.
if perms.Response != nil {
rp := *perms.Response
c.perms.resp = &rp
c.replies = make(map[string]*resp)
}
// Loop over subscribe permissions
if perms.Subscribe != nil {
var err error
if len(perms.Subscribe.Allow) > 0 {
c.perms.sub.allow = NewSublistWithCache()
}
for _, subSubject := range perms.Subscribe.Allow {
sub := &subscription{}
sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
if err != nil {
c.Errorf("%s", err.Error())
continue
}
c.perms.sub.allow.Insert(sub)
}
if len(perms.Subscribe.Deny) > 0 {
c.perms.sub.deny = NewSublistWithCache()
// Also hold onto this array for later.
c.darray = perms.Subscribe.Deny
}
for _, subSubject := range perms.Subscribe.Deny {
sub := &subscription{}
sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
if err != nil {
c.Errorf("%s", err.Error())
continue
}
c.perms.sub.deny.Insert(sub)
}
}
}
// Check to see if we have an expiration for the user JWT via base claims.
// FIXME(dlc) - Clear on connect with new JWT.
func (c *client) checkExpiration(claims *jwt.ClaimsData) {
if claims.Expires == 0 {
return
}
tn := time.Now().Unix()
if claims.Expires < tn {
return
}
expiresAt := time.Duration(claims.Expires - tn)
c.setExpirationTimer(expiresAt * time.Second)
}
// This will load up the deny structure used for filtering delivered
// messages based on a deny clause for subscriptions.
// Lock should be held.
func (c *client) loadMsgDenyFilter() {
c.mperms = &msgDeny{NewSublistWithCache(), make(map[string]bool)}
for _, sub := range c.darray {
c.mperms.deny.Insert(&subscription{subject: []byte(sub)})
}
}
// writeLoop is the main socket write functionality.
// Runs in its own Go routine.
func (c *client) writeLoop() {
defer c.srv.grWG.Done()
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return
}
c.flags.set(writeLoopStarted)
ch := c.out.sch
c.mu.Unlock()
// This will clear connection state and remove it from the server.
defer c.teardownConn()
// Used to check that we did flush from last wake up.
waitOk := true
// Used to limit the wait for a signal
const maxWait = time.Second
t := time.NewTimer(maxWait)
var close bool
// Main loop. Will wait to be signaled and then will use
// buffered outbound structure for efficient writev to the underlying socket.
for {
c.mu.Lock()
if close = c.flags.isSet(closeConnection); !close {
owtf := c.out.fsp > 0 && c.out.pb < maxBufSize && c.out.fsp < maxFlushPending
if waitOk && (c.out.pb == 0 || owtf) {
c.mu.Unlock()
// Reset our timer
t.Reset(maxWait)
// Wait on pending data.
select {
case <-ch:
case <-t.C:
}
c.mu.Lock()
close = c.flags.isSet(closeConnection)
}
}
if close {
c.flushAndClose(false)
c.mu.Unlock()
return
}
// Flush data
waitOk = c.flushOutbound()
c.mu.Unlock()
}
}
// flushClients will make sure to flush any clients we may have
// sent to during processing. We pass in a budget as a time.Duration
// for how much time to spend in place flushing for this client. This
// will normally be called in the readLoop of the client who sent the
// message that now is being delivered.
func (c *client) flushClients(budget time.Duration) time.Time {
last := time.Now()
// Check pending clients for flush.
for cp := range c.pcd {
// TODO(dlc) - Wonder if it makes more sense to create a new map?
delete(c.pcd, cp)
// Queue up a flush for those in the set
cp.mu.Lock()
// Update last activity for message delivery
cp.last = last
// Remove ourselves from the pending list.
cp.out.fsp--
// Just ignore if this was closed.
if cp.flags.isSet(closeConnection) {
cp.mu.Unlock()
continue
}
if budget > 0 && cp.flushOutbound() {
budget -= cp.out.lft
} else {
cp.flushSignal()
}
cp.mu.Unlock()
}
return last
}
// readLoop is the main socket read functionality.
// Runs in its own Go routine.
func (c *client) readLoop() {
// Grab the connection off the client, it will be cleared on a close.
// We check for that after the loop, but want to avoid a nil dereference
c.mu.Lock()
s := c.srv
defer s.grWG.Done()
if c.isClosed() {
c.mu.Unlock()
return
}
nc := c.nc
c.in.rsz = startBufSize
// Snapshot max control line since currently can not be changed on reload and we
// were checking it on each call to parse. If this changes and we allow MaxControlLine
// to be reloaded without restart, this code will need to change.
c.mcl = MAX_CONTROL_LINE_SIZE
if s != nil {
if opts := s.getOpts(); opts != nil {
c.mcl = int32(opts.MaxControlLine)
}
}
// Check the per-account-cache for closed subscriptions
cpacc := c.kind == ROUTER || c.kind == GATEWAY
// Last per-account-cache check for closed subscriptions
lpacc := time.Now()
c.mu.Unlock()
defer func() {
// These are used only in the readloop, so we can set them to nil
// on exit of the readLoop.
c.in.results, c.in.pacache = nil, nil
}()
// Start read buffer.
b := make([]byte, c.in.rsz)
for {
n, err := nc.Read(b)
// If we have any data we will try to parse and exit at the end.
if n == 0 && err != nil {
c.closeConnection(closedStateForErr(err))
return
}
start := time.Now()
// Clear inbound stats cache
c.in.msgs = 0
c.in.bytes = 0
c.in.subs = 0
// Main call into parser for inbound data. This will generate callouts
// to process messages, etc.
if err := c.parse(b[:n]); err != nil {
if dur := time.Since(start); dur >= readLoopReportThreshold {
c.Warnf("Readloop processing time: %v", dur)
}
// Need to call flushClients because some of the clients have been
// assigned messages and their "fsp" incremented, and need now to be
// decremented and their writeLoop signaled.
c.flushClients(0)
// handled inline
if err != ErrMaxPayload && err != ErrAuthentication {
c.Error(err)
c.closeConnection(ProtocolViolation)
}
return
}
// Updates stats for client and server that were collected
// from parsing through the buffer.
if c.in.msgs > 0 {
atomic.AddInt64(&c.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&c.inBytes, int64(c.in.bytes))
atomic.AddInt64(&s.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&s.inBytes, int64(c.in.bytes))
}
// Budget to spend in place flushing outbound data.
// Client will be checked on several fronts to see
// if applicable. Routes and Gateways will never
// spend time flushing outbound in place.
var budget time.Duration
if c.kind == CLIENT {
budget = time.Millisecond
}
// Flush, or signal to writeLoop to flush to socket.
last := c.flushClients(budget)
// Update activity, check read buffer size.
c.mu.Lock()
closed := c.isClosed()
// Activity based on interest changes or data/msgs.
if c.in.msgs > 0 || c.in.subs > 0 {
c.last = last
}
if n >= cap(b) {
c.in.srs = 0
} else if n < cap(b)/2 { // divide by 2 b/c we want less than what we would shrink to.
c.in.srs++
}
// Update read buffer size as/if needed.
if n >= cap(b) && cap(b) < maxBufSize {
// Grow
c.in.rsz = int32(cap(b) * 2)
b = make([]byte, c.in.rsz)
} else if n < cap(b) && cap(b) > minBufSize && c.in.srs > shortsToShrink {
// Shrink, for now don't accelerate, ping/pong will eventually sort it out.
c.in.rsz = int32(cap(b) / 2)
b = make([]byte, c.in.rsz)
}
c.mu.Unlock()
if dur := time.Since(start); dur >= readLoopReportThreshold {
c.Warnf("Readloop processing time: %v", dur)
}
// Check to see if we got closed, e.g. slow consumer
if closed {
return
}
// We could have had a read error from above but still read some data.
// If so do the close here unconditionally.
if err != nil {
c.closeConnection(closedStateForErr(err))
return
}
if cpacc && start.Sub(lpacc) >= closedSubsCheckInterval {
c.pruneClosedSubFromPerAccountCache()
lpacc = time.Now()
}
}
}
// Returns the appropriate closed state for a given read error.
func closedStateForErr(err error) ClosedState {
if err == io.EOF {
return ClientClosed
}
return ReadError
}
// collapsePtoNB will place primary onto nb buffer as needed in prep for WriteTo.
// This will return a copy on purpose.
func (c *client) collapsePtoNB() net.Buffers {
if c.out.p != nil {
p := c.out.p
c.out.p = nil
return append(c.out.nb, p)
}
return c.out.nb
}
// This will handle the fixup needed on a partial write.
// Assume pending has been already calculated correctly.
func (c *client) handlePartialWrite(pnb net.Buffers) {
nb := c.collapsePtoNB()
// The partial needs to be first, so append nb to pnb
c.out.nb = append(pnb, nb...)
}
// flushOutbound will flush outbound buffer to a client.
// Will return true if data was attempted to be written.
// Lock must be held
func (c *client) flushOutbound() bool {
if c.flags.isSet(flushOutbound) {
// For CLIENT connections, it is possible that the readLoop calls
// flushOutbound(). If writeLoop and readLoop compete and we are
// here we should release the lock to reduce the risk of spinning.
c.mu.Unlock()
runtime.Gosched()
c.mu.Lock()
return false
}
c.flags.set(flushOutbound)
defer c.flags.clear(flushOutbound)
// Check for nothing to do.
if c.nc == nil || c.srv == nil || c.out.pb == 0 {
return true // true because no need to queue a signal.
}
// Place primary on nb, assign primary to secondary, nil out nb and secondary.
nb := c.collapsePtoNB()
c.out.p, c.out.nb, c.out.s = c.out.s, nil, nil
// For selecting primary replacement.
cnb := nb
var lfs int
if len(cnb) > 0 {
lfs = len(cnb[0])
}
// In case it goes away after releasing the lock.
nc := c.nc
attempted := c.out.pb
apm := c.out.pm
// Capture this (we change the value in some tests)
wdl := c.out.wdl
// Do NOT hold lock during actual IO.
c.mu.Unlock()
// flush here
now := time.Now()
// FIXME(dlc) - writev will do multiple IOs past 1024 on
// most platforms, need to account for that with deadline?
nc.SetWriteDeadline(now.Add(wdl))
// Actual write to the socket.
n, err := nb.WriteTo(nc)
nc.SetWriteDeadline(time.Time{})
lft := time.Since(now)
// Re-acquire client lock.
c.mu.Lock()
if err != nil {
// Handle timeout error (slow consumer) differently
if ne, ok := err.(net.Error); ok && ne.Timeout() {
if closed := c.handleWriteTimeout(n, attempted, len(cnb)); closed {
return true
}
} else {
// Other errors will cause connection to be closed.
// For clients, report as debug but for others report as error.
report := c.Debugf
if c.kind != CLIENT {
report = c.Errorf
}
report("Error flushing: %v", err)
c.markConnAsClosed(WriteError, true)
return true
}
}
// Update flush time statistics.
c.out.lft = lft
c.out.lwb = int32(n)
// Subtract from pending bytes and messages.
c.out.pb -= int64(c.out.lwb)
c.out.pm -= apm // FIXME(dlc) - this will not be totally accurate on partials.
// Check for partial writes
// TODO(dlc) - zero write with no error will cause lost message and the writeloop to spin.
if int64(c.out.lwb) != attempted && n > 0 {
c.handlePartialWrite(nb)
} else if c.out.lwb >= c.out.sz {
c.out.sws = 0
}
// Adjust based on what we wrote plus any pending.
pt := int64(c.out.lwb) + c.out.pb
// Adjust sz as needed downward, keeping power of 2.
// We do this at a slower rate.
if pt < int64(c.out.sz) && c.out.sz > minBufSize {
c.out.sws++
if c.out.sws > shortsToShrink {
c.out.sz >>= 1
}
}
// Adjust sz as needed upward, keeping power of 2.
if pt > int64(c.out.sz) && c.out.sz < maxBufSize {
c.out.sz <<= 1
}
// Check to see if we can reuse buffers.
if lfs != 0 && n >= int64(lfs) {
oldp := cnb[0][:0]
if cap(oldp) >= int(c.out.sz) {
// Replace primary or secondary if they are nil, reusing same buffer.
if c.out.p == nil {
c.out.p = oldp
} else if c.out.s == nil || cap(c.out.s) < int(c.out.sz) {
c.out.s = oldp
}
}
}
// Check that if there is still data to send and writeLoop is in wait,
// then we need to signal.
if c.out.pb > 0 {
c.flushSignal()
}
// Check if we have a stalled gate and if so and we are recovering release
// any stalled producers. Only kind==CLIENT will stall.
if c.out.stc != nil && (int64(c.out.lwb) == attempted || c.out.pb < c.out.mp/2) {
close(c.out.stc)
c.out.stc = nil
}
return true
}
// This is invoked from flushOutbound() for io/timeout error (slow consumer).
// Returns a boolean to indicate if the connection has been closed or not.
// Lock is held on entry.
func (c *client) handleWriteTimeout(written, attempted int64, numChunks int) bool {
if tlsConn, ok := c.nc.(*tls.Conn); ok {
if !tlsConn.ConnectionState().HandshakeComplete {
// Likely a TLSTimeout error instead...
c.markConnAsClosed(TLSHandshakeError, true)
// Would need to coordinate with tlstimeout()
// to avoid double logging, so skip logging
// here, and don't report a slow consumer error.
return true
}
} else if c.flags.isSet(expectConnect) && !c.flags.isSet(connectReceived) {
// Under some conditions, a connection may hit a slow consumer write deadline
// before the authorization timeout. If that is the case, then we handle
// as slow consumer though we do not increase the counter as that can be
// misleading.
c.markConnAsClosed(SlowConsumerWriteDeadline, true)
return true
}
// Slow consumer here..
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: WriteDeadline of %v exceeded with %d chunks of %d total bytes.",
c.out.wdl, numChunks, attempted)
// We always close CLIENT connections, or when nothing was written at all...
if c.kind == CLIENT || written == 0 {
c.markConnAsClosed(SlowConsumerWriteDeadline, true)
return true
}
return false
}
// Marks this connection has closed with the given reason.
// Sets the closeConnection flag and skipFlushOnClose flag if asked.
// Depending on the kind of connection, the connection will be saved.
// If a writeLoop has been started, the final flush/close/teardown will
// be done there, otherwise flush and close of TCP connection is done here in place.
// Returns true if closed in place, flase otherwise.
// Lock is held on entry.
func (c *client) markConnAsClosed(reason ClosedState, skipFlush bool) bool {
if c.flags.isSet(closeConnection) {
return false
}
c.flags.set(closeConnection)
if skipFlush {
c.flags.set(skipFlushOnClose)
}
// Save off the connection if its a client or leafnode.
if c.kind == CLIENT || c.kind == LEAF {
if nc := c.nc; nc != nil && c.srv != nil {
// TODO: May want to send events to single go routine instead
// of creating a new go routine for each save.
go c.srv.saveClosedClient(c, nc, reason)
}
}
// If writeLoop exists, let it do the final flush, close and teardown.
if c.flags.isSet(writeLoopStarted) {
c.flushSignal()
return false
}
// Flush (if skipFlushOnClose is not set) and close in place. If flushing,
// use a small WriteDeadline.
c.flushAndClose(true)
return true
}
// flushSignal will use server to queue the flush IO operation to a pool of flushers.
// Lock must be held.
func (c *client) flushSignal() bool {
select {
case c.out.sch <- struct{}{}:
return true
default:
}
return false
}
// Traces a message.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceMsg(msg []byte) {
maxTrace := c.srv.getOpts().MaxTracedMsgLen
if maxTrace > 0 && (len(msg)-LEN_CR_LF) > maxTrace {
c.Tracef("<<- MSG_PAYLOAD: [\"%s...\"]", msg[:maxTrace])
} else {
c.Tracef("<<- MSG_PAYLOAD: [%q]", msg[:len(msg)-LEN_CR_LF])
}
}
// Traces an incoming operation.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceInOp(op string, arg []byte) {
c.traceOp("<<- %s", op, arg)
}
// Traces an outgoing operation.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceOutOp(op string, arg []byte) {
c.traceOp("->> %s", op, arg)
}
func (c *client) traceOp(format, op string, arg []byte) {
opa := []interface{}{}
if op != "" {
opa = append(opa, op)
}
if arg != nil {
opa = append(opa, string(arg))
}
c.Tracef(format, opa)
}
// Process the information messages from Clients and other Routes.
func (c *client) processInfo(arg []byte) error {
info := Info{}
if err := json.Unmarshal(arg, &info); err != nil {
return err
}
switch c.kind {
case ROUTER:
c.processRouteInfo(&info)
case GATEWAY:
c.processGatewayInfo(&info)
case LEAF:
return c.processLeafnodeInfo(&info)
}
return nil
}
func (c *client) processErr(errStr string) {
close := true
switch c.kind {
case CLIENT:
c.Errorf("Client Error %s", errStr)
case ROUTER:
c.Errorf("Route Error %s", errStr)
case GATEWAY:
c.Errorf("Gateway Error %s", errStr)
case LEAF:
c.Errorf("Leafnode Error %s", errStr)
c.leafProcessErr(errStr)
close = false
}
if close {
c.closeConnection(ParseError)
}
}
// Password pattern matcher.
var passPat = regexp.MustCompile(`"?\s*pass\S*?"?\s*[:=]\s*"?(([^",\r\n}])*)`)
// removePassFromTrace removes any notion of passwords from trace
// messages for logging.
func removePassFromTrace(arg []byte) []byte {
if !bytes.Contains(arg, []byte(`pass`)) {
return arg
}
// Take a copy of the connect proto just for the trace message.
var _arg [4096]byte
buf := append(_arg[:0], arg...)
m := passPat.FindAllSubmatchIndex(buf, -1)
if len(m) == 0 {
return arg
}
redactedPass := []byte("[REDACTED]")
for _, i := range m {
if len(i) < 4 {
continue
}
start := i[2]
end := i[3]
// Replace password substring.
buf = append(buf[:start], append(redactedPass, buf[end:]...)...)
break
}
return buf
}
// Returns the RTT by computing the elapsed time since now and `start`.
// On Windows VM where I (IK) run tests, time.Since() will return 0
// (I suspect some time granularity issues). So return at minimum 1ns.
func computeRTT(start time.Time) time.Duration {
rtt := time.Since(start)
if rtt <= 0 {
rtt = time.Nanosecond
}
return rtt
}
func (c *client) processConnect(arg []byte) error {
c.mu.Lock()
// If we can't stop the timer because the callback is in progress...
if !c.clearAuthTimer() {
// wait for it to finish and handle sending the failure back to
// the client.
for !c.isClosed() {
c.mu.Unlock()
time.Sleep(25 * time.Millisecond)
c.mu.Lock()
}
c.mu.Unlock()
return nil
}
c.last = time.Now()
// Estimate RTT to start.
if c.kind == CLIENT {
c.rtt = computeRTT(c.start)
if c.srv != nil {
c.clearPingTimer()
c.srv.setFirstPingTimer(c)
}
}
kind := c.kind
srv := c.srv
// Moved unmarshalling of clients' Options under the lock.
// The client has already been added to the server map, so it is possible
// that other routines lookup the client, and access its options under
// the client's lock, so unmarshalling the options outside of the lock
// would cause data RACEs.
if err := json.Unmarshal(arg, &c.opts); err != nil {
c.mu.Unlock()
return err
}
// Indicate that the CONNECT protocol has been received, and that the
// server now knows which protocol this client supports.
c.flags.set(connectReceived)
// Capture these under lock
c.echo = c.opts.Echo
proto := c.opts.Protocol
verbose := c.opts.Verbose
lang := c.opts.Lang
account := c.opts.Account
accountNew := c.opts.AccountNew
ujwt := c.opts.JWT
c.mu.Unlock()
if srv != nil {
// Applicable to clients only:
// As soon as c.opts is unmarshalled and if the proto is at
// least ClientProtoInfo, we need to increment the following counter.
// This is decremented when client is removed from the server's
// clients map.
if kind == CLIENT && proto >= ClientProtoInfo {
srv.mu.Lock()
srv.cproto++
srv.mu.Unlock()
}
// Check for Auth
if ok := srv.checkAuthentication(c); !ok {
// We may fail here because we reached max limits on an account.
if ujwt != "" {
c.mu.Lock()
acc := c.acc
c.mu.Unlock()
srv.mu.Lock()
tooManyAccCons := acc != nil && acc != srv.gacc
srv.mu.Unlock()
if tooManyAccCons {
return ErrTooManyAccountConnections
}
}
c.authViolation()
return ErrAuthentication
}
// Check for Account designation, this section should be only used when there is not a jwt.
if account != "" {
var acc *Account
var wasNew bool
var err error
if !srv.NewAccountsAllowed() {
acc, err = srv.LookupAccount(account)
if err != nil {
c.Errorf(err.Error())
c.sendErr(ErrMissingAccount.Error())
return err
} else if accountNew && acc != nil {
c.sendErrAndErr(ErrAccountExists.Error())
return ErrAccountExists
}
} else {
// We can create this one on the fly.
acc, wasNew = srv.LookupOrRegisterAccount(account)
if accountNew && !wasNew {
c.sendErrAndErr(ErrAccountExists.Error())
return ErrAccountExists
}
}
// If we are here we can register ourselves with the new account.
if err := c.registerWithAccount(acc); err != nil {
c.reportErrRegisterAccount(acc, err)
return ErrBadAccount
}
} else if c.acc == nil {
// By default register with the global account.
c.registerWithAccount(srv.gacc)
}
}
switch kind {
case CLIENT:
// Check client protocol request if it exists.
if proto < ClientProtoZero || proto > ClientProtoInfo {
c.sendErr(ErrBadClientProtocol.Error())
c.closeConnection(BadClientProtocolVersion)
return ErrBadClientProtocol
}
if verbose {
c.sendOK()
}
case ROUTER:
// Delegate the rest of processing to the route
return c.processRouteConnect(srv, arg, lang)
case GATEWAY:
// Delegate the rest of processing to the gateway
return c.processGatewayConnect(arg)
case LEAF:
// Delegate the rest of processing to the leaf node
return c.processLeafNodeConnect(srv, arg, lang)
}
return nil
}
func (c *client) sendErrAndErr(err string) {
c.sendErr(err)
c.Errorf(err)
}
func (c *client) sendErrAndDebug(err string) {
c.sendErr(err)
c.Debugf(err)
}
func (c *client) authTimeout() {
c.sendErrAndDebug("Authentication Timeout")
c.closeConnection(AuthenticationTimeout)
}
func (c *client) authExpired() {
c.sendErrAndDebug("User Authentication Expired")
c.closeConnection(AuthenticationExpired)
}
func (c *client) accountAuthExpired() {
c.sendErrAndDebug("Account Authentication Expired")
c.closeConnection(AuthenticationExpired)
}
func (c *client) authViolation() {
var s *Server
var hasTrustedNkeys, hasNkeys, hasUsers bool
if s = c.srv; s != nil {
s.mu.Lock()
hasTrustedNkeys = len(s.trustedKeys) > 0
hasNkeys = s.nkeys != nil
hasUsers = s.users != nil
s.mu.Unlock()
defer s.sendAuthErrorEvent(c)
}
if hasTrustedNkeys {
c.Errorf("%v", ErrAuthentication)
} else if hasNkeys {
c.Errorf("%s - Nkey %q",
ErrAuthentication.Error(),
c.opts.Nkey)
} else if hasUsers {
c.Errorf("%s - User %q",
ErrAuthentication.Error(),
c.opts.Username)
} else {
c.Errorf(ErrAuthentication.Error())
}
c.sendErr("Authorization Violation")
c.closeConnection(AuthenticationViolation)
}
func (c *client) maxAccountConnExceeded() {
c.sendErrAndErr(ErrTooManyAccountConnections.Error())
c.closeConnection(MaxAccountConnectionsExceeded)
}
func (c *client) maxConnExceeded() {
c.sendErrAndErr(ErrTooManyConnections.Error())
c.closeConnection(MaxConnectionsExceeded)
}
func (c *client) maxSubsExceeded() {
c.sendErrAndErr(ErrTooManySubs.Error())
}
func (c *client) maxPayloadViolation(sz int, max int32) {
c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max)
c.sendErr("Maximum Payload Violation")
c.closeConnection(MaxPayloadExceeded)
}
// queueOutbound queues data for a clientconnection.
// Return if the data is referenced or not. If referenced, the caller
// should not reuse the `data` array.
// Lock should be held.
func (c *client) queueOutbound(data []byte) bool {
// Do not keep going if closed
if c.flags.isSet(closeConnection) {
return false
}
// Assume data will not be referenced
referenced := false
// Add to pending bytes total.
c.out.pb += int64(len(data))
// Check for slow consumer via pending bytes limit.
// ok to return here, client is going away.
if c.kind == CLIENT && c.out.pb > c.out.mp {
// Perf wise, it looks like it is faster to optimistically add than
// checking current pb+len(data) and then add to pb.
c.out.pb -= int64(len(data))
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: MaxPending of %d Exceeded", c.out.mp)
c.markConnAsClosed(SlowConsumerPendingBytes, true)
return referenced
}
if c.out.p == nil && len(data) < maxBufSize {
if c.out.sz == 0 {
c.out.sz = startBufSize
}
if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) {
c.out.p = c.out.s
c.out.s = nil
} else {
// FIXME(dlc) - make power of 2 if less than maxBufSize?
c.out.p = make([]byte, 0, c.out.sz)
}
}
// Determine if we copy or reference
available := cap(c.out.p) - len(c.out.p)
if len(data) > available {
// We can't fit everything into existing primary, but message will
// fit in next one we allocate or utilize from the secondary.
// So copy what we can.
if available > 0 && len(data) < int(c.out.sz) {
c.out.p = append(c.out.p, data[:available]...)
data = data[available:]
}
// Put the primary on the nb if it has a payload
if len(c.out.p) > 0 {
c.out.nb = append(c.out.nb, c.out.p)
c.out.p = nil
}
// Check for a big message, and if found place directly on nb
// FIXME(dlc) - do we need signaling of ownership here if we want len(data) < maxBufSize
if len(data) > maxBufSize {
c.out.nb = append(c.out.nb, data)
referenced = true
} else {
// We will copy to primary.
if c.out.p == nil {
// Grow here
if (c.out.sz << 1) <= maxBufSize {
c.out.sz <<= 1
}
if len(data) > int(c.out.sz) {
c.out.p = make([]byte, 0, len(data))
} else {
if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { // TODO(dlc) - Size mismatch?
c.out.p = c.out.s
c.out.s = nil
} else {
c.out.p = make([]byte, 0, c.out.sz)
}
}
}
c.out.p = append(c.out.p, data...)
}
} else {
c.out.p = append(c.out.p, data...)
}
// Check here if we should create a stall channel if we are falling behind.
// We do this here since if we wait for consumer's writeLoop it could be
// too late with large number of fan in producers.
if c.out.pb > c.out.mp/2 && c.out.stc == nil {
c.out.stc = make(chan struct{})
}
return referenced
}
// Assume the lock is held upon entry.
func (c *client) enqueueProtoAndFlush(proto []byte, doFlush bool) {
if c.isClosed() {
return
}
c.queueOutbound(proto)
if !(doFlush && c.flushOutbound()) {
c.flushSignal()
}
}
// Queues and then flushes the connection. This should only be called when
// the writeLoop cannot be started yet. Use enqueueProto() otherwise.
// Lock is held on entry.
func (c *client) sendProtoNow(proto []byte) {
c.enqueueProtoAndFlush(proto, true)
}
// Enqueues the given protocol and signal the writeLoop if necessary.
// Lock is held on entry.
func (c *client) enqueueProto(proto []byte) {
c.enqueueProtoAndFlush(proto, false)
}
// Assume the lock is held upon entry.
func (c *client) sendPong() {
if c.trace {
c.traceOutOp("PONG", nil)
}
c.enqueueProto([]byte(pongProto))
}
// Used to kick off a RTT measurement for latency tracking.
func (c *client) sendRTTPing() bool {
c.mu.Lock()
sent := c.sendRTTPingLocked()
c.mu.Unlock()
return sent
}
// Used to kick off a RTT measurement for latency tracking.
// This is normally called only when the caller has checked that
// the c.rtt is 0 and wants to force an update by sending a PING.
// Client lock held on entry.
func (c *client) sendRTTPingLocked() bool {
// Most client libs send a CONNECT+PING and wait for a PONG from the
// server. So if firstPongSent flag is set, it is ok for server to
// send the PING. But in case we have client libs that don't do that,
// allow the send of the PING if more than 2 secs have elapsed since
// the client TCP connection was accepted.
if !c.flags.isSet(closeConnection) &&
(c.flags.isSet(firstPongSent) || time.Since(c.start) > maxNoRTTPingBeforeFirstPong) {
c.sendPing()
return true
}
return false
}
// Assume the lock is held upon entry.
func (c *client) sendPing() {
c.rttStart = time.Now()
c.ping.out++
if c.trace {
c.traceOutOp("PING", nil)
}
c.enqueueProto([]byte(pingProto))
}
// Generates the INFO to be sent to the client with the client ID included.
// info arg will be copied since passed by value.
// Assume lock is held.
func (c *client) generateClientInfoJSON(info Info) []byte {
info.CID = c.cid
info.ClientIP = c.host
info.MaxPayload = c.mpay
// Generate the info json
b, _ := json.Marshal(info)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
return bytes.Join(pcs, []byte(" "))
}
func (c *client) sendErr(err string) {
c.mu.Lock()
if c.trace {
c.traceOutOp("-ERR", []byte(err))
}
c.enqueueProto([]byte(fmt.Sprintf(errProto, err)))
c.mu.Unlock()
}
func (c *client) sendOK() {
c.mu.Lock()
if c.trace {
c.traceOutOp("OK", nil)
}
c.enqueueProto([]byte(okProto))
c.pcd[c] = needFlush
c.mu.Unlock()
}
func (c *client) processPing() {
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return
}
c.sendPong()
// Record this to suppress us sending one if this
// is within a given time interval for activity.
c.ping.last = time.Now()
// If not a CLIENT, we are done. Also the CONNECT should
// have been received, but make sure it is so before proceeding
if c.kind != CLIENT || !c.flags.isSet(connectReceived) {
c.mu.Unlock()
return
}
// If we are here, the CONNECT has been received so we know
// if this client supports async INFO or not.
var (
checkInfoChange bool
srv = c.srv
)
// For older clients, just flip the firstPongSent flag if not already
// set and we are done.
if c.opts.Protocol < ClientProtoInfo || srv == nil {
c.flags.setIfNotSet(firstPongSent)
} else {
// This is a client that supports async INFO protocols.
// If this is the first PING (so firstPongSent is not set yet),
// we will need to check if there was a change in cluster topology
// or we have a different max payload. We will send this first before
// pong since most clients do flush after connect call.
checkInfoChange = !c.flags.isSet(firstPongSent)
}
c.mu.Unlock()
if checkInfoChange {
opts := srv.getOpts()
srv.mu.Lock()
c.mu.Lock()
// Now that we are under both locks, we can flip the flag.
// This prevents sendAsyncInfoToClients() and code here to
// send a double INFO protocol.
c.flags.set(firstPongSent)
// If there was a cluster update since this client was created,
// send an updated INFO protocol now.
if srv.lastCURLsUpdate >= c.start.UnixNano() || c.mpay != int32(opts.MaxPayload) {
c.enqueueProto(c.generateClientInfoJSON(srv.copyInfo()))
}
c.mu.Unlock()
srv.mu.Unlock()
}
}
func (c *client) processPong() {
c.mu.Lock()
c.ping.out = 0
c.rtt = computeRTT(c.rttStart)
srv := c.srv
reorderGWs := c.kind == GATEWAY && c.gw.outbound
c.mu.Unlock()
if reorderGWs {
srv.gateway.orderOutboundConnections()
}
}
func (c *client) processPub(arg []byte) error {
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_PUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 2:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.size = parseSize(args[1])
c.pa.szb = args[1]
case 3:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.size = parseSize(args[2])
c.pa.szb = args[2]
default:
return fmt.Errorf("processPub Parse Error: '%s'", arg)
}
// If number overruns an int64, parseSize() will have returned a negative value
if c.pa.size < 0 {
return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg)
}
maxPayload := atomic.LoadInt32(&c.mpay)
// Use int64() to avoid int32 overrun...
if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Publish Subject")
}
return nil
}
func splitArg(arg []byte) [][]byte {
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
return args
}
func (c *client) processSub(argo []byte, noForward bool) (*subscription, error) {
// Indicate activity.
c.in.subs++
// Copy so we do not reference a potentially large buffer
// FIXME(dlc) - make more efficient.
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 2:
sub.subject = args[0]
sub.queue = nil
sub.sid = args[1]
case 3:
sub.subject = args[0]
sub.queue = args[1]
sub.sid = args[2]
default:
return nil, fmt.Errorf("processSub Parse Error: '%s'", arg)
}
c.mu.Lock()
// Grab connection type, account and server info.
kind := c.kind
acc := c.acc
srv := c.srv
sid := string(sub.sid)
// This check does not apply to SYSTEM clients (because they don't have a `nc`...)
if kind != SYSTEM && c.isClosed() {
c.mu.Unlock()
return sub, nil
}
// Check permissions if applicable.
if kind == CLIENT {
// First do a pass whether queue subscription is valid. This does not necessarily
// mean that it will not be able to plain subscribe.
//
// allow = ["foo"] -> can subscribe or queue subscribe to foo using any queue
// allow = ["foo v1"] -> can only queue subscribe to 'foo v1', no plain subs allowed.
// allow = ["foo", "foo v1"] -> can subscribe to 'foo' but can only queue subscribe to 'foo v1'
//
if sub.queue != nil {
if !c.canQueueSubscribe(string(sub.subject), string(sub.queue)) {
c.mu.Unlock()
c.subPermissionViolation(sub)
return nil, nil
}
} else if !c.canSubscribe(string(sub.subject)) {
c.mu.Unlock()
c.subPermissionViolation(sub)
return nil, nil
}
}
// Check if we have a maximum on the number of subscriptions.
if c.subsAtLimit() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil, nil
}
var updateGWs bool
var err error
// Subscribe here.
if c.subs[sid] == nil {
c.subs[sid] = sub
if acc != nil && acc.sl != nil {
err = acc.sl.Insert(sub)
if err != nil {
delete(c.subs, sid)
} else {
updateGWs = c.srv.gateway.enabled
}
}
}
// Unlocked from here onward
c.mu.Unlock()
if err != nil {
c.sendErr("Invalid Subject")
return nil, nil
} else if c.opts.Verbose && kind != SYSTEM {
c.sendOK()
}
// No account just return.
if acc == nil {
return sub, nil
}
if err := c.addShadowSubscriptions(acc, sub); err != nil {
c.Errorf(err.Error())
}
if noForward {
return sub, nil
}
// If we are routing and this is a local sub, add to the route map for the associated account.
if kind == CLIENT || kind == SYSTEM {
srv.updateRouteSubscriptionMap(acc, sub, 1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, 1)
return sub, nil
}
// If the client's account has stream imports and there are matches for
// this subscription's subject, then add shadow subscriptions in the
// other accounts that export this subject.
func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error {
if acc == nil {
return ErrMissingAccount
}
var (
rims [32]*streamImport
ims = rims[:0]
rfroms [32]*streamImport
froms = rfroms[:0]
tokens []string
tsa [32]string
hasWC bool
)
acc.mu.RLock()
// Loop over the import subjects. We have 3 scenarios. If we exact
// match or we know the proposed subject is a strict subset of the
// import we can subscribe to the subscription's subject directly.
// The third scenario is where the proposed subject has a wildcard
// and may not be an exact subset, but is a match. Therefore we have to
// subscribe to the import subject, not the subscription's subject.
for _, im := range acc.imports.streams {
if im.invalid {
continue
}
subj := string(sub.subject)
if subj == im.prefix+im.from {
ims = append(ims, im)
continue
}
if tokens == nil {
tokens = tsa[:0]
start := 0
for i := 0; i < len(subj); i++ {
// This is not perfect, but the test below will
// be more exact, this is just to trigger the
// additional test.
if subj[i] == pwc || subj[i] == fwc {
hasWC = true
} else if subj[i] == btsep {
tokens = append(tokens, subj[start:i])
start = i + 1
}
}
tokens = append(tokens, subj[start:])
}
if isSubsetMatch(tokens, im.prefix+im.from) {
ims = append(ims, im)
} else if hasWC {
if subjectIsSubsetMatch(im.prefix+im.from, subj) {
froms = append(froms, im)
}
}
}
acc.mu.RUnlock()
var shadow []*subscription
if len(ims) > 0 || len(froms) > 0 {
shadow = make([]*subscription, 0, len(ims)+len(froms))
}
// Now walk through collected importMaps
for _, im := range ims {
// We will create a shadow subscription.
nsub, err := c.addShadowSub(sub, im, false)
if err != nil {
return err
}
shadow = append(shadow, nsub)
}
// Now walk through importMaps that we need to subscribe
// exactly to the "from" property.
for _, im := range froms {
// We will create a shadow subscription.
nsub, err := c.addShadowSub(sub, im, true)
if err != nil {
return err
}
shadow = append(shadow, nsub)
}
if shadow != nil {
c.mu.Lock()
sub.shadow = shadow
c.mu.Unlock()
}
return nil
}
// Add in the shadow subscription.
func (c *client) addShadowSub(sub *subscription, im *streamImport, useFrom bool) (*subscription, error) {
nsub := *sub // copy
nsub.im = im
if useFrom {
nsub.subject = []byte(im.from)
} else if im.prefix != "" {
// redo subject here to match subject in the publisher account space.
// Just remove prefix from what they gave us. That maps into other space.
nsub.subject = sub.subject[len(im.prefix):]
}
c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name)
if err := im.acc.sl.Insert(&nsub); err != nil {
errs := fmt.Sprintf("Could not add shadow import subscription for account %q", im.acc.Name)
c.Debugf(errs)
return nil, fmt.Errorf(errs)
}
// Update our route map here.
c.srv.updateRouteSubscriptionMap(im.acc, &nsub, 1)
c.srv.updateLeafNodes(im.acc, &nsub, 1)
return &nsub, nil
}
// canSubscribe determines if the client is authorized to subscribe to the
// given subject. Assumes caller is holding lock.
func (c *client) canSubscribe(subject string) bool {
if c.perms == nil {
return true
}
allowed := true
// Check allow list. If no allow list that means all are allowed. Deny can overrule.
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
allowed = len(r.psubs) != 0
}
// If we have a deny list and we think we are allowed, check that as well.
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
allowed = len(r.psubs) == 0
// We use the actual subscription to signal us to spin up the deny mperms
// and cache. We check if the subject is a wildcard that contains any of
// the deny clauses.
// FIXME(dlc) - We could be smarter and track when these go away and remove.
if allowed && c.mperms == nil && subjectHasWildcard(subject) {
// Whip through the deny array and check if this wildcard subject is within scope.
for _, sub := range c.darray {
tokens := strings.Split(sub, tsep)
if isSubsetMatch(tokens, sub) {
c.loadMsgDenyFilter()
break
}
}
}
}
return allowed
}
func queueMatches(queue string, qsubs [][]*subscription) bool {
if len(qsubs) == 0 {
return true
}
for _, qsub := range qsubs {
qs := qsub[0]
qname := string(qs.queue)
// NOTE: '*' and '>' tokens can also be valid
// queue names so we first check against the
// literal name. e.g. v1.* == v1.*
if queue == qname || (subjectHasWildcard(qname) && subjectIsSubsetMatch(queue, qname)) {
return true
}
}
return false
}
func (c *client) canQueueSubscribe(subject, queue string) bool {
if c.perms == nil {
return true
}
allowed := true
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
// If perms DO NOT have queue name, then psubs will be greater than
// zero. If perms DO have queue name, then qsubs will be greater than
// zero.
allowed = len(r.psubs) > 0
if len(r.qsubs) > 0 {
// If the queue appears in the allow list, then DO allow.
allowed = queueMatches(queue, r.qsubs)
}
}
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
// If perms DO NOT have queue name, then psubs will be greater than
// zero. If perms DO have queue name, then qsubs will be greater than
// zero.
allowed = len(r.psubs) == 0
if len(r.qsubs) > 0 {
// If the queue appears in the deny list, then DO NOT allow.
allowed = !queueMatches(queue, r.qsubs)
}
}
return allowed
}
// Low level unsubscribe for a given client.
func (c *client) unsubscribe(acc *Account, sub *subscription, force, remove bool) {
c.mu.Lock()
if !force && sub.max > 0 && sub.nm < sub.max {
c.Debugf(
"Deferring actual UNSUB(%s): %d max, %d received",
string(sub.subject), sub.max, sub.nm)
c.mu.Unlock()
return
}
if c.trace {
c.traceOp("<-> %s", "DELSUB", sub.sid)
}
if c.kind != CLIENT && c.kind != SYSTEM {
c.removeReplySubTimeout(sub)
}
// Remove accounting if requested. This will be false when we close a connection
// with open subscriptions.
if remove {
delete(c.subs, string(sub.sid))
if acc != nil {
acc.sl.Remove(sub)
}
}
// Check to see if we have shadow subscriptions.
var updateRoute bool
shadowSubs := sub.shadow
sub.shadow = nil
if len(shadowSubs) > 0 {
updateRoute = (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) && c.srv != nil
}
sub.close()
c.mu.Unlock()
// Process shadow subs if we have them.
for _, nsub := range shadowSubs {
if err := nsub.im.acc.sl.Remove(nsub); err != nil {
c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name)
} else if updateRoute {
c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1)
}
// Now check on leafnode updates.
c.srv.updateLeafNodes(nsub.im.acc, nsub, -1)
}
// Now check to see if this was part of a respMap entry for service imports.
if acc != nil {
acc.checkForRespEntry(string(sub.subject))
}
}
func (c *client) processUnsub(arg []byte) error {
args := splitArg(arg)
var sid []byte
max := -1
switch len(args) {
case 1:
sid = args[0]
case 2:
sid = args[0]
max = parseSize(args[1])
default:
return fmt.Errorf("processUnsub Parse Error: '%s'", arg)
}
// Indicate activity.
c.in.subs++
var sub *subscription
var ok, unsub bool
c.mu.Lock()
// Grab connection type.
kind := c.kind
srv := c.srv
var acc *Account
updateGWs := false
if sub, ok = c.subs[string(sid)]; ok {
acc = c.acc
if max > 0 {
sub.max = int64(max)
} else {
// Clear it here to override
sub.max = 0
unsub = true
}
updateGWs = srv.gateway.enabled
}
c.mu.Unlock()
if c.opts.Verbose {
c.sendOK()
}
if unsub {
c.unsubscribe(acc, sub, false, true)
if acc != nil && kind == CLIENT || kind == SYSTEM {
srv.updateRouteSubscriptionMap(acc, sub, -1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, -1)
}
return nil
}
// checkDenySub will check if we are allowed to deliver this message in the
// presence of deny clauses for subscriptions. Deny clauses will not prevent
// larger scoped wildcard subscriptions, so we need to check at delivery time.
// Lock should be held.
func (c *client) checkDenySub(subject string) bool {
if denied, ok := c.mperms.dcache[subject]; ok {
return denied
} else if r := c.mperms.deny.Match(subject); len(r.psubs) != 0 {
c.mperms.dcache[subject] = true
return true
} else {
c.mperms.dcache[subject] = false
}
if len(c.mperms.dcache) > maxDenyPermCacheSize {
c.pruneDenyCache()
}
return false
}
func (c *client) msgHeader(mh []byte, sub *subscription, reply []byte) []byte {
if len(sub.sid) > 0 {
mh = append(mh, sub.sid...)
mh = append(mh, ' ')
}
if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, _CRLF_...)
return mh
}
func (c *client) stalledWait(producer *client) {
stall := c.out.stc
ttl := stallDuration(c.out.pb, c.out.mp)
c.mu.Unlock()
defer c.mu.Lock()
select {
case <-stall:
case <-time.After(ttl):
producer.Debugf("Timed out of fast producer stall (%v)", ttl)
}
}
func stallDuration(pb, mp int64) time.Duration {
ttl := stallClientMinDuration
if pb >= mp {
ttl = stallClientMaxDuration
} else if hmp := mp / 2; pb > hmp {
bsz := hmp / 10
additional := int64(ttl) * ((pb - hmp) / bsz)
ttl += time.Duration(additional)
}
return ttl
}
// Used to treat maps as efficient set
var needFlush = struct{}{}
// deliverMsg will deliver a message to a matching subscription and its underlying client.
// We process all connection/client types. mh is the part that will be protocol/client specific.
func (c *client) deliverMsg(sub *subscription, subject, mh, msg []byte, gwrply bool) bool {
if sub.client == nil {
return false
}
client := sub.client
client.mu.Lock()
// Check echo
if c == client && !client.echo {
client.mu.Unlock()
return false
}
// Check if we have a subscribe deny clause. This will trigger us to check the subject
// for a match against the denied subjects.
if client.mperms != nil && client.checkDenySub(string(subject)) {
client.mu.Unlock()
return false
}
// This is set under the client lock using atomic because it can be
// checked with atomic without the client lock. Here, we don't need
// the atomic operation since we are under the lock.
if sub.closed == 1 {
client.mu.Unlock()
return false
}
srv := client.srv
sub.nm++
// Check if we should auto-unsubscribe.
if sub.max > 0 {
if client.kind == ROUTER && sub.nm >= sub.max {
// The only router based messages that we will see here are remoteReplies.
// We handle these slightly differently.
defer client.removeReplySub(sub)
} else {
// For routing..
shouldForward := client.kind == CLIENT || client.kind == SYSTEM && client.srv != nil
// If we are at the exact number, unsubscribe but
// still process the message in hand, otherwise
// unsubscribe and drop message on the floor.
if sub.nm == sub.max {
client.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'", sub.max, string(sub.sid))
// Due to defer, reverse the code order so that execution
// is consistent with other cases where we unsubscribe.
if shouldForward {
if srv.gateway.enabled {
defer srv.gatewayUpdateSubInterest(client.acc.Name, sub, -1)
}
defer srv.updateRouteSubscriptionMap(client.acc, sub, -1)
}
defer client.unsubscribe(client.acc, sub, true, true)
} else if sub.nm > sub.max {
client.Debugf("Auto-unsubscribe limit [%d] exceeded", sub.max)
client.mu.Unlock()
client.unsubscribe(client.acc, sub, true, true)
if shouldForward {
srv.updateRouteSubscriptionMap(client.acc, sub, -1)
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(client.acc.Name, sub, -1)
}
}
return false
}
}
}
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
msgSize := int64(len(msg) - LEN_CR_LF)
// No atomic needed since accessed under client lock.
// Monitor is reading those also under client's lock.
client.outMsgs++
client.outBytes += msgSize
atomic.AddInt64(&srv.outMsgs, 1)
atomic.AddInt64(&srv.outBytes, msgSize)
// Check for internal subscription.
if client.kind == SYSTEM {
s := client.srv
client.mu.Unlock()
s.deliverInternalMsg(sub, c, subject, c.pa.reply, msg[:msgSize])
return true
}
// If we are a client and we detect that the consumer we are
// sending to is in a stalled state, go ahead and wait here
// with a limit.
if c.kind == CLIENT && client.out.stc != nil {
client.stalledWait(c)
}
// Check for closed connection
if client.isClosed() {
client.mu.Unlock()
return false
}
// Do a fast check here to see if we should be tracking this from a latency
// perspective. This will be for a request being received for an exported service.
// This needs to be from a non-client (otherwise tracking happens at requestor).
//
// Also this check captures if the original reply (c.pa.reply) is a GW routed
// reply (since it is known to be > minReplyLen). If that is the case, we need to
// track the binding between the routed reply and the reply set in the message
// header (which is c.pa.reply without the GNR routing prefix).
if client.kind == CLIENT && len(c.pa.reply) > minReplyLen {
if gwrply {
// Note we keep track "in" the destination client (`client`) but the
// routed reply subject is in `c.pa.reply`. Should that change, we
// would have to pass the "reply" in deliverMsg().
srv.trackGWReply(client, c.pa.reply)
}
// If we do not have a registered RTT queue that up now.
if client.rtt == 0 {
client.sendRTTPingLocked()
}
// FIXME(dlc) - We may need to optimize this.
// We will have tagged this with a suffix ('.T') if we are tracking. This is
// needed from sampling. Not all will be tracked.
if c.kind != CLIENT && client.acc.IsExportServiceTracking(string(subject)) && isTrackedReply(c.pa.reply) {
client.trackRemoteReply(string(c.pa.reply))
}
}
// Queue to outbound buffer
client.queueOutbound(mh)
client.queueOutbound(msg)
client.out.pm++
// If we are tracking dynamic publish permissions that track reply subjects,
// do that accounting here. We only look at client.replies which will be non-nil.
if client.replies != nil && len(c.pa.reply) > 0 {
client.replies[string(c.pa.reply)] = &resp{time.Now(), 0}
if len(client.replies) > replyPermLimit {
client.pruneReplyPerms()
}
}
// Check outbound threshold and queue IO flush if needed.
// This is specifically looking at situations where we are getting behind and may want
// to intervene before this producer goes back to top of readloop. We are in the producer's
// readloop go routine at this point.
// FIXME(dlc) - We may call this alot, maybe suppress after first call?
if client.out.pm > 1 && client.out.pb > maxBufSize*2 {
client.flushSignal()
}
// Add the data size we are responsible for here. This will be processed when we
// return to the top of the readLoop.
if _, ok := c.pcd[client]; !ok {
client.out.fsp++
c.pcd[client] = needFlush
}
if client.trace {
client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil)
}
client.mu.Unlock()
return true
}
// This will track a remote reply for an exported service that has requested
// latency tracking.
// Lock assumed to be held.
func (c *client) trackRemoteReply(reply string) {
if c.rrTracking == nil {
c.rrTracking = make(map[string]*remoteLatency)
c.rrMax = c.acc.MaxAutoExpireResponseMaps()
}
rl := remoteLatency{
Account: c.acc.Name,
ReqId: reply,
}
rl.M2.RequestStart = time.Now()
c.rrTracking[reply] = &rl
if len(c.rrTracking) >= c.rrMax {
c.pruneRemoteTracking()
}
}
// pruneReplyPerms will remove any stale or expired entries
// in our reply cache. We make sure to not check too often.
func (c *client) pruneReplyPerms() {
// Make sure we do not check too often.
if c.perms.resp == nil {
return
}
mm := c.perms.resp.MaxMsgs
ttl := c.perms.resp.Expires
now := time.Now()
for k, resp := range c.replies {
if mm > 0 && resp.n >= mm {
delete(c.replies, k)
} else if ttl > 0 && now.Sub(resp.t) > ttl {
delete(c.replies, k)
}
}
}
// pruneDenyCache will prune the deny cache via randomly
// deleting items. Doing so pruneSize items at a time.
// Lock must be held for this one since it is shared under
// deliverMsg.
func (c *client) pruneDenyCache() {
r := 0
for subject := range c.mperms.dcache {
delete(c.mperms.dcache, subject)
if r++; r > pruneSize {
break
}
}
}
// prunePubPermsCache will prune the cache via randomly
// deleting items. Doing so pruneSize items at a time.
func (c *client) prunePubPermsCache() {
r := 0
for subject := range c.perms.pcache {
delete(c.perms.pcache, subject)
if r++; r > pruneSize {
break
}
}
}
// pruneRemoteTracking will prune any remote tracking objects
// that are too old. These are orphaned when a service is not
// sending reponses etc.
// Lock should be held upon entry.
func (c *client) pruneRemoteTracking() {
ttl := c.acc.AutoExpireTTL()
now := time.Now()
for reply, rl := range c.rrTracking {
if now.Sub(rl.M2.RequestStart) > ttl {
delete(c.rrTracking, reply)
}
}
}
// pubAllowed checks on publish permissioning.
// Lock should not be held.
func (c *client) pubAllowed(subject string) bool {
return c.pubAllowedFullCheck(subject, true)
}
// pubAllowedFullCheck checks on all publish permissioning depending
// on the flag for dynamic reply permissions.
func (c *client) pubAllowedFullCheck(subject string, fullCheck bool) bool {
if c.perms == nil || (c.perms.pub.allow == nil && c.perms.pub.deny == nil) {
return true
}
// Check if published subject is allowed if we have permissions in place.
allowed, ok := c.perms.pcache[subject]
if ok {
return allowed
}
// Cache miss, check allow then deny as needed.
if c.perms.pub.allow != nil {
r := c.perms.pub.allow.Match(subject)
allowed = len(r.psubs) != 0
} else {
// No entries means all are allowed. Deny will overrule as needed.
allowed = true
}
// If we have a deny list and are currently allowed, check that as well.
if allowed && c.perms.pub.deny != nil {
r := c.perms.pub.deny.Match(subject)
allowed = len(r.psubs) == 0
}
// If we are currently not allowed but we are tracking reply subjects
// dynamically, check to see if we are allowed here but avoid pcache.
// We need to acquire the lock though.
if !allowed && fullCheck && c.perms.resp != nil {
c.mu.Lock()
if resp := c.replies[subject]; resp != nil {
resp.n++
// Check if we have sent too many responses.
if c.perms.resp.MaxMsgs > 0 && resp.n > c.perms.resp.MaxMsgs {
delete(c.replies, subject)
} else if c.perms.resp.Expires > 0 && time.Since(resp.t) > c.perms.resp.Expires {
delete(c.replies, subject)
} else {
allowed = true
}
}
c.mu.Unlock()
} else {
// Update our cache here.
c.perms.pcache[string(subject)] = allowed
// Prune if needed.
if len(c.perms.pcache) > maxPermCacheSize {
c.prunePubPermsCache()
}
}
return allowed
}
// Test whether a reply subject is a service import reply.
func isServiceReply(reply []byte) bool {
// This function is inlined and checking this way is actually faster
// than byte-by-byte comparison.
return len(reply) > 3 && string(reply[:4]) == replyPrefix
}
// Test whether a reply subject is a service import or a gateway routed reply.
func isReservedReply(reply []byte) bool {
if isServiceReply(reply) {
return true
}
// Faster to check with string([:]) than byte-by-byte
if len(reply) > gwReplyPrefixLen && string(reply[:gwReplyPrefixLen]) == gwReplyPrefix {
return true
}
return false
}
// This will decide to call the client code or router code.
func (c *client) processInboundMsg(msg []byte) {
switch c.kind {
case CLIENT:
c.processInboundClientMsg(msg)
case ROUTER:
c.processInboundRoutedMsg(msg)
case GATEWAY:
c.processInboundGatewayMsg(msg)
case LEAF:
c.processInboundLeafMsg(msg)
}
}
// processInboundClientMsg is called to process an inbound msg from a client.
func (c *client) processInboundClientMsg(msg []byte) {
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
c.in.msgs++
c.in.bytes += int32(len(msg) - LEN_CR_LF)
// Check that client (could be here with SYSTEM) is not publishing on reserved "$GNR" prefix.
if c.kind == CLIENT && hasGWRoutedReplyPrefix(c.pa.subject) {
c.pubPermissionViolation(c.pa.subject)
return
}
// Check pub permissions
if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) {
c.pubPermissionViolation(c.pa.subject)
return
}
// Now check for reserved replies. These are used for service imports.
if len(c.pa.reply) > 0 && isReservedReply(c.pa.reply) {
c.replySubjectViolation(c.pa.reply)
return
}
if c.opts.Verbose {
c.sendOK()
}
// Mostly under testing scenarios.
if c.srv == nil || c.acc == nil {
return
}
// Check if this client's gateway replies map is not empty
if atomic.LoadInt32(&c.cgwrt) > 0 && c.handleGWReplyMap(msg) {
return
}
// Check to see if we need to map/route to another account.
if c.acc.imports.services != nil {
c.checkForImportServices(c.acc, msg)
}
// If we have an exported service and we are doing remote tracking, check this subject
// to see if we need to report the latency.
if c.rrTracking != nil {
c.mu.Lock()
rl := c.rrTracking[string(c.pa.subject)]
if rl != nil {
delete(c.rrTracking, string(c.pa.subject))
}
rtt := c.rtt
c.mu.Unlock()
if rl != nil {
sl := &rl.M2
// Fill this in and send it off to the other side.
sl.AppName = c.opts.Name
sl.ServiceLatency = time.Since(sl.RequestStart) - rtt
sl.NATSLatency.Responder = rtt
sl.TotalLatency = sl.ServiceLatency + rtt
sanitizeLatencyMetric(sl)
lsub := remoteLatencySubjectForResponse(c.pa.subject)
c.srv.sendInternalAccountMsg(nil, lsub, &rl) // Send to SYS account
}
}
// Match the subscriptions. We will use our own L1 map if
// it's still valid, avoiding contention on the shared sublist.
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&c.acc.sl.genid)
if genid == c.in.genid && c.in.results != nil {
r, ok = c.in.results[string(c.pa.subject)]
} else {
// Reset our L1 completely.
c.in.results = make(map[string]*SublistResult)
c.in.genid = genid
}
// Go back to the sublist data structure.
if !ok {
r = c.acc.sl.Match(string(c.pa.subject))
c.in.results[string(c.pa.subject)] = r
// Prune the results cache. Keeps us from unbounded growth. Random delete.
if len(c.in.results) > maxResultCacheSize {
n := 0
for subject := range c.in.results {
delete(c.in.results, subject)
if n++; n > pruneSize {
break
}
}
}
}
var qnames [][]byte
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) > 0 {
flag := pmrNoFlag
// If there are matching queue subs and we are in gateway mode,
// we need to keep track of the queue names the messages are
// delivered to. When sending to the GWs, the RMSG will include
// those names so that the remote clusters do not deliver messages
// to their queue subs of the same names.
if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
flag |= pmrCollectQueueNames
}
qnames = c.processMsgResults(c.acc, r, msg, c.pa.subject, c.pa.reply, flag)
}
// Now deal with gateways
if c.srv.gateway.enabled {
c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, qnames)
}
}
// This is invoked knowing that this client has some GW replies
// in its map. It will check if one is find for the c.pa.subject
// and if so will process it directly (send to GWs and LEAF) and
// return true to notify the caller that the message was handled.
// If there is no mapping for the subject, false is returned.
func (c *client) handleGWReplyMap(msg []byte) bool {
c.mu.Lock()
rm, ok := c.gwrm[string(c.pa.subject)]
if !ok {
c.mu.Unlock()
return false
}
// Set subject to the mapped reply subject
c.pa.subject = []byte(rm.ms)
var rl *remoteLatency
var rtt time.Duration
if c.rrTracking != nil {
rl = c.rrTracking[string(c.pa.subject)]
if rl != nil {
delete(c.rrTracking, string(c.pa.subject))
}
rtt = c.rtt
}
c.mu.Unlock()
if rl != nil {
sl := &rl.M2
// Fill this in and send it off to the other side.
sl.AppName = c.opts.Name
sl.ServiceLatency = time.Since(sl.RequestStart) - rtt
sl.NATSLatency.Responder = rtt
sl.TotalLatency = sl.ServiceLatency + rtt
sanitizeLatencyMetric(sl)
lsub := remoteLatencySubjectForResponse(c.pa.subject)
c.srv.sendInternalAccountMsg(nil, lsub, &rl) // Send to SYS account
}
// Check for leaf nodes
if c.srv.gwLeafSubs.Count() > 0 {
if r := c.srv.gwLeafSubs.Match(string(c.pa.subject)); len(r.psubs) > 0 {
c.processMsgResults(c.acc, r, msg, c.pa.subject, c.pa.reply, pmrNoFlag)
}
}
if c.srv.gateway.enabled {
c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, nil)
}
return true
}
// This checks and process import services by doing the mapping and sending the
// message onward if applicable.
func (c *client) checkForImportServices(acc *Account, msg []byte) {
if acc == nil || acc.imports.services == nil {
return
}
acc.mu.RLock()
si := acc.imports.services[string(c.pa.subject)]
invalid := si != nil && si.invalid
acc.mu.RUnlock()
// Get the results from the other account for the mapped "to" subject.
// If we have been marked invalid simply return here.
if si != nil && !invalid && si.acc != nil && si.acc.sl != nil {
var nrr []byte
if c.pa.reply != nil {
var latency *serviceLatency
var tracking bool
if tracking = shouldSample(si.latency); tracking {
latency = si.latency
}
// We want to remap this to provide anonymity.
nrr = si.acc.newServiceReply(tracking)
si.acc.addRespServiceImport(acc, string(nrr), string(c.pa.reply), si.rt, latency)
// Track our responses for cleanup if not auto-expire.
if si.rt != Singleton {
acc.addRespMapEntry(si.acc, string(c.pa.reply), string(nrr))
} else if si.latency != nil && c.rtt == 0 {
// We have a service import that we are tracking but have not established RTT.
c.sendRTTPing()
}
}
// FIXME(dlc) - Do L1 cache trick from above.
rr := si.acc.sl.Match(si.to)
// Check to see if we have no results and this is an internal serviceImport. If so we
// need to clean that up.
if len(rr.psubs)+len(rr.qsubs) == 0 && si.internal {
// We may also have a response entry, so go through that way.
si.acc.checkForRespEntry(si.to)
}
flags := pmrNoFlag
// If we are a route or gateway or leafnode and this message is flipped to a queue subscriber we
// need to handle that since the processMsgResults will want a queue filter.
if c.kind == GATEWAY || c.kind == ROUTER || c.kind == LEAF {
flags |= pmrIgnoreEmptyQueueFilter
}
if c.srv.gateway.enabled {
flags |= pmrCollectQueueNames
queues := c.processMsgResults(si.acc, rr, msg, []byte(si.to), nrr, flags)
c.sendMsgToGateways(si.acc, msg, []byte(si.to), nrr, queues)
} else {
c.processMsgResults(si.acc, rr, msg, []byte(si.to), nrr, flags)
}
shouldRemove := si.ae
// Calculate tracking info here if we are tracking this request/response.
if si.tracking {
if requesting := firstSubFromResult(rr); requesting != nil {
shouldRemove = acc.sendTrackingLatency(si, requesting.client, c)
}
}
if shouldRemove {
acc.removeServiceImport(si.from)
}
}
}
func (c *client) addSubToRouteTargets(sub *subscription) {
if c.in.rts == nil {
c.in.rts = make([]routeTarget, 0, routeTargetInit)
}
for i := range c.in.rts {
rt := &c.in.rts[i]
if rt.sub.client == sub.client {
if sub.queue != nil {
rt.qs = append(rt.qs, sub.queue...)
rt.qs = append(rt.qs, ' ')
}
return
}
}
var rt *routeTarget
lrts := len(c.in.rts)
// If we are here we do not have the sub yet in our list
// If we have to grow do so here.
if lrts == cap(c.in.rts) {
c.in.rts = append(c.in.rts, routeTarget{})
}
c.in.rts = c.in.rts[:lrts+1]
rt = &c.in.rts[lrts]
rt.sub = sub
rt.qs = rt._qs[:0]
if sub.queue != nil {
rt.qs = append(rt.qs, sub.queue...)
rt.qs = append(rt.qs, ' ')
}
}
// This processes the sublist results for a given message.
func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, subject, reply []byte, flags int) [][]byte {
var queues [][]byte
// msg header for clients.
msgh := c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, subject...)
msgh = append(msgh, ' ')
si := len(msgh)
// For sending messages across routes and leafnodes.
// Reset if we have one since we reuse this data structure.
if c.in.rts != nil {
c.in.rts = c.in.rts[:0]
}
var rplyHasGWPrefix bool
var creply = reply
// If the reply subject is a GW routed reply, we will perform some
// tracking in deliverMsg(). We also want to send to the user the
// reply without the prefix. `creply` will be set to that and be
// used to create the message header for client connections.
if rplyHasGWPrefix = isGWRoutedReply(reply); rplyHasGWPrefix {
creply = reply[gwSubjectOffset:]
}
// Loop over all normal subscriptions that match.
for _, sub := range r.psubs {
// Check if this is a send to a ROUTER. We now process
// these after everything else.
switch sub.client.kind {
case ROUTER:
if (c.kind != ROUTER && !c.isSpokeLeafNode()) || (flags&pmrAllowSendFromRouteToRoute != 0) {
c.addSubToRouteTargets(sub)
}
continue
case GATEWAY:
// Never send to gateway from here.
continue
case LEAF:
// We handle similarly to routes and use the same data structures.
// Leaf node delivery audience is different however.
// Also leaf nodes are always no echo, so we make sure we are not
// going to send back to ourselves here.
if c != sub.client && (c.kind != ROUTER || !c.isSpokeLeafNode()) {
c.addSubToRouteTargets(sub)
}
continue
}
// Check for stream import mapped subs. These apply to local subs only.
if sub.im != nil && sub.im.prefix != "" {
// Redo the subject here on the fly.
msgh = c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, sub.im.prefix...)
msgh = append(msgh, subject...)
msgh = append(msgh, ' ')
si = len(msgh)
}
// Normal delivery
mh := c.msgHeader(msgh[:si], sub, creply)
c.deliverMsg(sub, subject, mh, msg, rplyHasGWPrefix)
}
// Set these up to optionally filter based on the queue lists.
// This is for messages received from routes which will have directed
// guidance on which queue groups we should deliver to.
qf := c.pa.queues
// For all non-client connections, we may still want to send messages to
// leaf nodes or routes even if there are no queue filters since we collect
// them above and do not process inline like normal clients.
// However, do select queue subs if asked to ignore empty queue filter.
if c.kind != CLIENT && qf == nil && flags&pmrIgnoreEmptyQueueFilter == 0 {
goto sendToRoutesOrLeafs
}
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.in.prand == nil {
c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Process queue subs
for i := 0; i < len(r.qsubs); i++ {
qsubs := r.qsubs[i]
// If we have a filter check that here. We could make this a map or someting more
// complex but linear search since we expect queues to be small. Should be faster
// and more cache friendly.
if qf != nil && len(qsubs) > 0 {
tqn := qsubs[0].queue
for _, qn := range qf {
if bytes.Equal(qn, tqn) {
goto selectQSub
}
}
continue
}
selectQSub:
// We will hold onto remote or lead qsubs when we are coming from
// a route or a leaf node just in case we can no longer do local delivery.
var rsub, sub *subscription
var _ql [32]*subscription
src := c.kind
// If we just came from a route we want to prefer local subs.
// So only select from local subs but remember the first rsub
// in case all else fails.
if src == ROUTER {
ql := _ql[:0]
for i := 0; i < len(qsubs); i++ {
sub = qsubs[i]
if sub.client.kind == CLIENT {
ql = append(ql, sub)
} else if rsub == nil {
rsub = sub
}
}
qsubs = ql
}
sindex := 0
lqs := len(qsubs)
if lqs > 1 {
sindex = c.in.prand.Int() % lqs
}
// Find a subscription that is able to deliver this message starting at a random index.
for i := 0; i < lqs; i++ {
if sindex+i < lqs {
sub = qsubs[sindex+i]
} else {
sub = qsubs[(sindex+i)%lqs]
}
if sub == nil {
continue
}
// We have taken care of preferring local subs for a message from a route above.
// Here we just care about a client or leaf and skipping a leaf and preferring locals.
if dst := sub.client.kind; dst == ROUTER || dst == LEAF {
if (src == LEAF || src == CLIENT) && dst == LEAF {
if rsub == nil {
rsub = sub
}
continue
} else {
c.addSubToRouteTargets(sub)
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, sub.queue)
}
}
break
}
// Check for mapped subs
if sub.im != nil && sub.im.prefix != "" {
// Redo the subject here on the fly.
msgh = c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, sub.im.prefix...)
msgh = append(msgh, subject...)
msgh = append(msgh, ' ')
si = len(msgh)
}
var rreply = reply
if rplyHasGWPrefix && sub.client.kind == CLIENT {
rreply = creply
}
// "rreply" will be stripped of the $GNR prefix (if present)
// for client connections only.
mh := c.msgHeader(msgh[:si], sub, rreply)
if c.deliverMsg(sub, subject, mh, msg, rplyHasGWPrefix) {
// Clear rsub
rsub = nil
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, sub.queue)
}
break
}
}
if rsub != nil {
// If we are here we tried to deliver to a local qsub
// but failed. So we will send it to a remote or leaf node.
c.addSubToRouteTargets(rsub)
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, rsub.queue)
}
}
}
sendToRoutesOrLeafs:
// If no messages for routes or leafnodes return here.
if len(c.in.rts) == 0 {
return queues
}
// We address by index to avoid struct copy.
// We have inline structs for memory layout and cache coherency.
for i := range c.in.rts {
rt := &c.in.rts[i]
kind := rt.sub.client.kind
mh := c.msgb[:msgHeadProtoLen]
if kind == ROUTER {
// Router (and Gateway) nodes are RMSG. Set here since leafnodes may rewrite.
mh[0] = 'R'
mh = append(mh, acc.Name...)
mh = append(mh, ' ')
} else {
// Leaf nodes are LMSG
mh[0] = 'L'
// Remap subject if its a shadow subscription, treat like a normal client.
if rt.sub.im != nil && rt.sub.im.prefix != "" {
mh = append(mh, rt.sub.im.prefix...)
}
}
mh = append(mh, subject...)
mh = append(mh, ' ')
if len(rt.qs) > 0 {
if reply != nil {
mh = append(mh, "+ "...) // Signal that there is a reply.
mh = append(mh, reply...)
mh = append(mh, ' ')
} else {
mh = append(mh, "| "...) // Only queues
}
mh = append(mh, rt.qs...)
} else if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, _CRLF_...)
c.deliverMsg(rt.sub, subject, mh, msg, false)
}
return queues
}
func (c *client) pubPermissionViolation(subject []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject))
c.Errorf("Publish Violation - %s, Subject %q", c.getAuthUser(), subject)
}
func (c *client) subPermissionViolation(sub *subscription) {
errTxt := fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject)
logTxt := fmt.Sprintf("Subscription Violation - %s, Subject %q, SID %s",
c.getAuthUser(), sub.subject, sub.sid)
if sub.queue != nil {
errTxt = fmt.Sprintf("Permissions Violation for Subscription to %q using queue %q", sub.subject, sub.queue)
logTxt = fmt.Sprintf("Subscription Violation - %s, Subject %q, Queue: %q, SID %s",
c.getAuthUser(), sub.subject, sub.queue, sub.sid)
}
c.sendErr(errTxt)
c.Errorf(logTxt)
}
func (c *client) replySubjectViolation(reply []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish with Reply of %q", reply))
c.Errorf("Publish Violation - %s, Reply %q", c.getAuthUser(), reply)
}
func (c *client) processPingTimer() {
c.mu.Lock()
c.ping.tmr = nil
// Check if connection is still opened
if c.isClosed() {
c.mu.Unlock()
return
}
c.Debugf("%s Ping Timer", c.typeString())
// If we have had activity within the PingInterval then
// there is no need to send a ping. This can be client data
// or if we received a ping from the other side.
pingInterval := c.srv.getOpts().PingInterval
now := time.Now()
needRTT := c.rtt == 0 || now.Sub(c.rttStart) > DEFAULT_RTT_MEASUREMENT_INTERVAL
if delta := now.Sub(c.last); delta < pingInterval && !needRTT {
c.Debugf("Delaying PING due to client activity %v ago", delta.Round(time.Second))
} else if delta := now.Sub(c.ping.last); delta < pingInterval && !needRTT {
c.Debugf("Delaying PING due to remote ping %v ago", delta.Round(time.Second))
} else {
// Check for violation
if c.ping.out+1 > c.srv.getOpts().MaxPingsOut {
c.Debugf("Stale Client Connection - Closing")
c.enqueueProto([]byte(fmt.Sprintf(errProto, "Stale Connection")))
c.mu.Unlock()
c.closeConnection(StaleConnection)
return
}
// Send PING
c.sendPing()
}
// Reset to fire again.
c.setPingTimer()
c.mu.Unlock()
}
// Lock should be held
func (c *client) setPingTimer() {
if c.srv == nil {
return
}
d := c.srv.getOpts().PingInterval
c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
}
// Lock should be held
func (c *client) clearPingTimer() {
if c.ping.tmr == nil {
return
}
c.ping.tmr.Stop()
c.ping.tmr = nil
}
// Lock should be held
func (c *client) setAuthTimer(d time.Duration) {
c.atmr = time.AfterFunc(d, c.authTimeout)
}
// Lock should be held
func (c *client) clearAuthTimer() bool {
if c.atmr == nil {
return true
}
stopped := c.atmr.Stop()
c.atmr = nil
return stopped
}
// We may reuse atmr for expiring user jwts,
// so check connectReceived.
// Lock assume held on entry.
func (c *client) awaitingAuth() bool {
return !c.flags.isSet(connectReceived) && c.atmr != nil
}
// This will set the atmr for the JWT expiration time.
// We will lock on entry.
func (c *client) setExpirationTimer(d time.Duration) {
c.mu.Lock()
c.atmr = time.AfterFunc(d, c.authExpired)
c.mu.Unlock()
}
// Possibly flush the connection and then close the low level connection.
// The boolean `minimalFlush` indicates if the flush operation should have a
// minimal write deadline.
// Lock is held on entry.
func (c *client) flushAndClose(minimalFlush bool) {
if !c.flags.isSet(skipFlushOnClose) && c.out.pb > 0 {
if minimalFlush {
const lowWriteDeadline = 100 * time.Millisecond
// Reduce the write deadline if needed.
if c.out.wdl > lowWriteDeadline {
c.out.wdl = lowWriteDeadline
}
}
c.flushOutbound()
}
c.out.p, c.out.s = nil, nil
// Close the low level connection. WriteDeadline need to be set
// in case this is a TLS connection.
if c.nc != nil {
c.nc.SetWriteDeadline(time.Now().Add(100 * time.Millisecond))
c.nc.Close()
}
}
func (c *client) typeString() string {
switch c.kind {
case CLIENT:
return "Client"
case ROUTER:
return "Router"
case GATEWAY:
return "Gateway"
case LEAF:
return "LeafNode"
}
return "Unknown Type"
}
// processSubsOnConfigReload removes any subscriptions the client has that are no
// longer authorized, and check for imports (accounts) due to a config reload.
func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) {
c.mu.Lock()
var (
checkPerms = c.perms != nil
checkAcc = c.acc != nil
acc = c.acc
)
if !checkPerms && !checkAcc {
c.mu.Unlock()
return
}
var (
_subs [32]*subscription
subs = _subs[:0]
_removed [32]*subscription
removed = _removed[:0]
srv = c.srv
)
if checkAcc {
// We actually only want to check if stream imports have changed.
if _, ok := awcsti[acc.Name]; !ok {
checkAcc = false
}
}
// We will clear any mperms we have here. It will rebuild on the fly with canSubscribe,
// so we do that here as we collect them. We will check result down below.
c.mperms = nil
// Collect client's subs under the lock
for _, sub := range c.subs {
// Just checking to rebuild mperms under the lock, will collect removed though here.
// Only collect under subs array of canSubscribe and checkAcc true.
canSub := c.canSubscribe(string(sub.subject))
canQSub := sub.queue != nil && c.canQueueSubscribe(string(sub.subject), string(sub.queue))
if !canSub && !canQSub {
removed = append(removed, sub)
} else if checkAcc {
subs = append(subs, sub)
}
}
c.mu.Unlock()
// This list is all subs who are allowed and we need to check accounts.
for _, sub := range subs {
c.mu.Lock()
oldShadows := sub.shadow
sub.shadow = nil
c.mu.Unlock()
c.addShadowSubscriptions(acc, sub)
for _, nsub := range oldShadows {
nsub.im.acc.sl.Remove(nsub)
}
}
// Unsubscribe all that need to be removed and report back to client and logs.
for _, sub := range removed {
c.unsubscribe(acc, sub, true, true)
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q (sid %q)",
sub.subject, sub.sid))
srv.Noticef("Removed sub %q (sid %q) for %s - not authorized",
sub.subject, sub.sid, c.getAuthUser())
}
}
// Allows us to count up all the queue subscribers during close.
type qsub struct {
sub *subscription
n int32
}
func (c *client) closeConnection(reason ClosedState) {
c.mu.Lock()
if c.nc == nil || c.flags.isSet(closeConnection) {
c.mu.Unlock()
return
}
// This will set the closeConnection flag and save the connection, etc..
// Will return true if no writeLoop was started and TCP connection was
// closed in place, in which case we need to do the teardown.
teardownNow := c.markConnAsClosed(reason, false)
c.mu.Unlock()
if teardownNow {
c.teardownConn()
}
}
// Clear the state of this connection and remove it from the server.
// If the connection was initiated (such as ROUTE, GATEWAY, etc..) this may trigger
// a reconnect. This function MUST be called only once per connection. It normally
// happens when the writeLoop returns, or in closeConnection() if no writeLoop has
// been started.
func (c *client) teardownConn() {
c.mu.Lock()
// Be consistent with the creation: for routes and gateways,
// we use Noticef on create, so use that too for delete.
if c.kind == ROUTER || c.kind == GATEWAY {
c.Noticef("%s connection closed", c.typeString())
} else { // Client and Leaf Node connections.
c.Debugf("%s connection closed", c.typeString())
}
c.clearAuthTimer()
c.clearPingTimer()
// Unblock anyone who is potentially stalled waiting on us.
if c.out.stc != nil {
close(c.out.stc)
c.out.stc = nil
}
c.nc = nil
var (
retryImplicit bool
connectURLs []string
gwName string
gwIsOutbound bool
gwCfg *gatewayCfg
kind = c.kind
srv = c.srv
noReconnect = c.flags.isSet(noReconnect)
acc = c.acc
)
// Snapshot for use if we are a client connection.
// FIXME(dlc) - we can just stub in a new one for client
// and reference existing one.
var subs []*subscription
if kind == CLIENT || kind == LEAF {
var _subs [32]*subscription
subs = _subs[:0]
for _, sub := range c.subs {
// Auto-unsubscribe subscriptions must be unsubscribed forcibly.
sub.max = 0
sub.close()
subs = append(subs, sub)
}
}
if c.route != nil {
if !noReconnect {
retryImplicit = c.route.retry
}
connectURLs = c.route.connectURLs
}
if kind == GATEWAY {
gwName = c.gw.name
gwIsOutbound = c.gw.outbound
gwCfg = c.gw.cfg
}
c.mu.Unlock()
// Remove client's or leaf node subscriptions.
if (kind == CLIENT || kind == LEAF) && acc != nil {
acc.sl.RemoveBatch(subs)
} else if kind == ROUTER {
go c.removeRemoteSubs()
}
if srv != nil {
// This is a route that disconnected, but we are not in lame duck mode...
if len(connectURLs) > 0 && !srv.isLameDuckMode() {
// Unless disabled, possibly update the server's INFO protocol
// and send to clients that know how to handle async INFOs.
if !srv.getOpts().Cluster.NoAdvertise {
srv.removeClientConnectURLsAndSendINFOToClients(connectURLs)
}
}
// Unregister
srv.removeClient(c)
// Update remote subscriptions.
if acc != nil && (kind == CLIENT || kind == LEAF) {
qsubs := map[string]*qsub{}
for _, sub := range subs {
// Call unsubscribe here to cleanup shadow subscriptions and such.
c.unsubscribe(acc, sub, true, false)
// Update route as normal for a normal subscriber.
if sub.queue == nil {
srv.updateRouteSubscriptionMap(acc, sub, -1)
} else {
// We handle queue subscribers special in case we
// have a bunch we can just send one update to the
// connected routes.
key := string(sub.subject) + " " + string(sub.queue)
if esub, ok := qsubs[key]; ok {
esub.n++
} else {
qsubs[key] = &qsub{sub, 1}
}
}
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, -1)
}
// Process any qsubs here.
for _, esub := range qsubs {
srv.updateRouteSubscriptionMap(acc, esub.sub, -(esub.n))
srv.updateLeafNodes(acc, esub.sub, -(esub.n))
}
if prev := acc.removeClient(c); prev == 1 && srv != nil {
srv.decActiveAccounts()
}
}
}
// Don't reconnect connections that have been marked with
// the no reconnect flag.
if noReconnect {
return
}
// Check for a solicited route. If it was, start up a reconnect unless
// we are already connected to the other end.
if c.isSolicitedRoute() || retryImplicit {
// Capture these under lock
c.mu.Lock()
rid := c.route.remoteID
rtype := c.route.routeType
rurl := c.route.url
c.mu.Unlock()
srv.mu.Lock()
defer srv.mu.Unlock()
// It is possible that the server is being shutdown.
// If so, don't try to reconnect
if !srv.running {
return
}
if rid != "" && srv.remotes[rid] != nil {
srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid)
return
} else if rid == srv.info.ID {
srv.Debugf("Detected route to self, ignoring \"%s\"", rurl)
return
} else if rtype != Implicit || retryImplicit {
srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl)
// Keep track of this go-routine so we can wait for it on
// server shutdown.
srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) })
}
} else if srv != nil && kind == GATEWAY && gwIsOutbound {
if gwCfg != nil {
srv.Debugf("Attempting reconnect for gateway %q", gwName)
// Run this as a go routine since we may be called within
// the solicitGateway itself if there was an error during
// the creation of the gateway connection.
srv.startGoRoutine(func() { srv.reconnectGateway(gwCfg) })
} else {
srv.Debugf("Gateway %q not in configuration, not attempting reconnect", gwName)
}
} else if c.isSolicitedLeafNode() {
// Check if this is a solicited leaf node. Start up a reconnect.
srv.startGoRoutine(func() { srv.reConnectToRemoteLeafNode(c.leaf.remote) })
}
}
// Set the noReconnect flag. This is used before a call to closeConnection()
// to prevent the connection to reconnect (routes, gateways).
func (c *client) setNoReconnect() {
c.mu.Lock()
c.flags.set(noReconnect)
c.mu.Unlock()
}
// Returns the client's RTT value with the protection of the client's lock.
func (c *client) getRTTValue() time.Duration {
c.mu.Lock()
rtt := c.rtt
c.mu.Unlock()
return rtt
}
// This function is used by ROUTER and GATEWAY connections to
// look for a subject on a given account (since these type of
// connections are not bound to a specific account).
// If the c.pa.subject is found in the cache, the cached result
// is returned, otherwse, we match the account's sublist and update
// the cache. The cache is pruned if reaching a certain size.
func (c *client) getAccAndResultFromCache() (*Account, *SublistResult) {
var (
acc *Account
pac *perAccountCache
r *SublistResult
ok bool
)
// Check our cache.
if pac, ok = c.in.pacache[string(c.pa.pacache)]; ok {
// Check the genid to see if it's still valid.
if genid := atomic.LoadUint64(&pac.acc.sl.genid); genid != pac.genid {
ok = false
delete(c.in.pacache, string(c.pa.pacache))
} else {
acc = pac.acc
r = pac.results
}
}
if !ok {
// Match correct account and sublist.
if acc, _ = c.srv.LookupAccount(string(c.pa.account)); acc == nil {
return nil, nil
}
// Match against the account sublist.
r = acc.sl.Match(string(c.pa.subject))
// Store in our cache
c.in.pacache[string(c.pa.pacache)] = &perAccountCache{acc, r, atomic.LoadUint64(&acc.sl.genid)}
// Check if we need to prune.
if len(c.in.pacache) > maxPerAccountCacheSize {
c.prunePerAccountCache()
}
}
return acc, r
}
// Account will return the associated account for this client.
func (c *client) Account() *Account {
if c == nil {
return nil
}
c.mu.Lock()
defer c.mu.Unlock()
return c.acc
}
// prunePerAccountCache will prune off a random number of cache entries.
func (c *client) prunePerAccountCache() {
n := 0
for cacheKey := range c.in.pacache {
delete(c.in.pacache, cacheKey)
if n++; n > prunePerAccountCacheSize {
break
}
}
}
// pruneClosedSubFromPerAccountCache remove entries that contain subscriptions
// that have been closed.
func (c *client) pruneClosedSubFromPerAccountCache() {
for cacheKey, pac := range c.in.pacache {
for _, sub := range pac.results.psubs {
if sub.isClosed() {
goto REMOVE
}
}
for _, qsub := range pac.results.qsubs {
for _, sub := range qsub {
if sub.isClosed() {
goto REMOVE
}
}
}
continue
REMOVE:
delete(c.in.pacache, cacheKey)
}
}
// getAuthUser returns the auth user for the client.
func (c *client) getAuthUser() string {
switch {
case c.opts.Nkey != "":
return fmt.Sprintf("Nkey %q", c.opts.Nkey)
case c.opts.Username != "":
return fmt.Sprintf("User %q", c.opts.Username)
default:
return `User "N/A"`
}
}
// isClosed returns true if either closeConnection or clearConnection
// flag have been set, or if `nc` is nil, which may happen in tests.
func (c *client) isClosed() bool {
return c.flags.isSet(closeConnection) || c.nc == nil
}
// Logging functionality scoped to a client or route.
func (c *client) Error(err error) {
c.srv.Errors(c, err)
}
func (c *client) Errorf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Errorf(format, v...)
}
func (c *client) Debugf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Debugf(format, v...)
}
func (c *client) Noticef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Noticef(format, v...)
}
func (c *client) Tracef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Tracef(format, v...)
}
func (c *client) Warnf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Warnf(format, v...)
}
| 1 | 10,246 | I think if yo use %s then you do not need reason.String() and can just do reason. | nats-io-nats-server | go |
@@ -156,9 +156,9 @@ func WorkflowEndingRunID(endingRunID string) Tag {
return newStringTag("wf-ending-run-id", endingRunID)
}
-// WorkflowDecisionTimeoutSeconds returns tag for WorkflowDecisionTimeoutSeconds
-func WorkflowDecisionTimeoutSeconds(s int32) Tag {
- return newInt32("wf-decision-timeout", s)
+// WorkflowTaskTimeoutSeconds returns tag for WorkflowTaskTimeoutSeconds
+func WorkflowTaskTimeoutSeconds(s int32) Tag {
+ return newInt32("wf-workflow-task-timeout", s)
}
// QueryID returns tag for QueryID | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package tag
import (
"fmt"
"time"
"github.com/gogo/protobuf/types"
enumspb "go.temporal.io/api/enums/v1"
enumsspb "go.temporal.io/server/api/enums/v1"
)
// All logging tags are defined in this file.
// To help finding available tags, we recommend that all tags to be categorized and placed in the corresponding section.
// We currently have those categories:
// 0. Common tags that can't be categorized(or belong to more than one)
// 1. Workflow: these tags are information that are useful to our customer, like workflow-id/run-id/task-queue/...
// 2. System : these tags are internal information which usually cannot be understood by our customers,
/////////////////// Common tags defined here ///////////////////
// Error returns tag for Error
func Error(err error) Tag {
return newErrorTag("error", err)
}
// ClusterName returns tag for ClusterName
func ClusterName(clusterName string) Tag {
return newStringTag("cluster-name", clusterName)
}
// Timestamp returns tag for Timestamp
func Timestamp(timestamp time.Time) Tag {
return newTimeTag("timestamp", timestamp)
}
// Timestamp returns tag for Timestamp
func TimestampProto(timestamp *types.Timestamp) Tag {
t, _ := types.TimestampFromProto(timestamp)
return newTimeTag("timestamp", t)
}
// TimestampInt returns tag for Timestamp
func TimestampInt(timestamp int64) Tag {
return newInt64("timestamp", timestamp)
}
/////////////////// Workflow tags defined here: ( wf is short for workflow) ///////////////////
// WorkflowAction returns tag for WorkflowAction
func workflowAction(action string) Tag {
return newPredefinedStringTag("wf-action", action)
}
// WorkflowListFilterType returns tag for WorkflowListFilterType
func workflowListFilterType(listFilterType string) Tag {
return newPredefinedStringTag("wf-list-filter-type", listFilterType)
}
// general
// WorkflowError returns tag for WorkflowError
func WorkflowError(error error) Tag { return newErrorTag("wf-error", error) }
// WorkflowTimeoutType returns tag for WorkflowTimeoutType
func WorkflowTimeoutType(timeoutType enumspb.TimeoutType) Tag {
return newStringTag("wf-timeout-type", timeoutType.String())
}
// WorkflowPollContextTimeout returns tag for WorkflowPollContextTimeout
func WorkflowPollContextTimeout(pollContextTimeout time.Duration) Tag {
return newDurationTag("wf-poll-context-timeout", pollContextTimeout)
}
// WorkflowHandlerName returns tag for WorkflowHandlerName
func WorkflowHandlerName(handlerName string) Tag {
return newStringTag("wf-handler-name", handlerName)
}
// WorkflowID returns tag for WorkflowID
func WorkflowID(workflowID string) Tag {
return newStringTag("wf-id", workflowID)
}
// WorkflowType returns tag for WorkflowType
func WorkflowType(wfType string) Tag {
return newStringTag("wf-type", wfType)
}
// WorkflowState returns tag for WorkflowState
func WorkflowState(s enumsspb.WorkflowExecutionState) Tag {
return newStringTag("wf-state", s.String())
}
// WorkflowRunID returns tag for WorkflowRunID
func WorkflowRunID(runID string) Tag {
return newStringTag("wf-run-id", runID)
}
// WorkflowResetBaseRunID returns tag for WorkflowResetBaseRunID
func WorkflowResetBaseRunID(runID string) Tag {
return newStringTag("wf-reset-base-run-id", runID)
}
// WorkflowResetNewRunID returns tag for WorkflowResetNewRunID
func WorkflowResetNewRunID(runID string) Tag {
return newStringTag("wf-reset-new-run-id", runID)
}
// WorkflowBinaryChecksum returns tag for WorkflowBinaryChecksum
func WorkflowBinaryChecksum(cs string) Tag {
return newStringTag("wf-binary-checksum", cs)
}
// WorkflowActivityID returns tag for WorkflowActivityID
func WorkflowActivityID(id string) Tag {
return newStringTag("wf-activity-id", id)
}
// WorkflowTimerID returns tag for WorkflowTimerID
func WorkflowTimerID(id string) Tag {
return newStringTag("wf-timer-id", id)
}
// WorkflowBeginningRunID returns tag for WorkflowBeginningRunID
func WorkflowBeginningRunID(beginningRunID string) Tag {
return newStringTag("wf-beginning-run-id", beginningRunID)
}
// WorkflowEndingRunID returns tag for WorkflowEndingRunID
func WorkflowEndingRunID(endingRunID string) Tag {
return newStringTag("wf-ending-run-id", endingRunID)
}
// WorkflowDecisionTimeoutSeconds returns tag for WorkflowDecisionTimeoutSeconds
func WorkflowDecisionTimeoutSeconds(s int32) Tag {
return newInt32("wf-decision-timeout", s)
}
// QueryID returns tag for QueryID
func QueryID(queryID string) Tag {
return newStringTag("query-id", queryID)
}
// BlobSizeViolationOperation returns tag for BlobSizeViolationOperation
func BlobSizeViolationOperation(operation string) Tag {
return newStringTag("blob-size-violation-operation", operation)
}
// namespace related
// WorkflowNamespaceID returns tag for WorkflowNamespaceID
func WorkflowNamespaceID(namespaceID string) Tag {
return newStringTag("wf-namespace-id", namespaceID)
}
// WorkflowNamespace returns tag for WorkflowNamespace
func WorkflowNamespace(namespace string) Tag {
return newStringTag("wf-namespace", namespace)
}
// WorkflowNamespaceIDs returns tag for WorkflowNamespaceIDs
func WorkflowNamespaceIDs(namespaceIDs interface{}) Tag {
return newObjectTag("wf-namespace-ids", namespaceIDs)
}
// history event ID related
// WorkflowEventID returns tag for WorkflowEventID
func WorkflowEventID(eventID int64) Tag {
return newInt64("wf-history-event-id", eventID)
}
// WorkflowScheduleID returns tag for WorkflowScheduleID
func WorkflowScheduleID(scheduleID int64) Tag {
return newInt64("wf-schedule-id", scheduleID)
}
// WorkflowStartedID returns tag for WorkflowStartedID
func WorkflowStartedID(id int64) Tag {
return newInt64("wf-started-id", id)
}
// WorkflowInitiatedID returns tag for WorkflowInitiatedID
func WorkflowInitiatedID(id int64) Tag {
return newInt64("wf-initiated-id", id)
}
// WorkflowFirstEventID returns tag for WorkflowFirstEventID
func WorkflowFirstEventID(firstEventID int64) Tag {
return newInt64("wf-first-event-id", firstEventID)
}
// WorkflowNextEventID returns tag for WorkflowNextEventID
func WorkflowNextEventID(nextEventID int64) Tag {
return newInt64("wf-next-event-id", nextEventID)
}
// WorkflowBeginningFirstEventID returns tag for WorkflowBeginningFirstEventID
func WorkflowBeginningFirstEventID(beginningFirstEventID int64) Tag {
return newInt64("wf-begining-first-event-id", beginningFirstEventID)
}
// WorkflowEndingNextEventID returns tag for WorkflowEndingNextEventID
func WorkflowEndingNextEventID(endingNextEventID int64) Tag {
return newInt64("wf-ending-next-event-id", endingNextEventID)
}
// WorkflowResetNextEventID returns tag for WorkflowResetNextEventID
func WorkflowResetNextEventID(resetNextEventID int64) Tag {
return newInt64("wf-reset-next-event-id", resetNextEventID)
}
// history tree
// WorkflowTreeID returns tag for WorkflowTreeID
func WorkflowTreeID(treeID string) Tag {
return newStringTag("wf-tree-id", treeID)
}
// WorkflowBranchID returns tag for WorkflowBranchID
func WorkflowBranchID(branchID string) Tag {
return newStringTag("wf-branch-id", branchID)
}
// workflow task
// WorkflowDecisionType returns tag for WorkflowDecisionType
func WorkflowDecisionType(decisionType enumspb.DecisionType) Tag {
return newStringTag("wf-decision-type", decisionType.String())
}
// WorkflowQueryType returns tag for WorkflowQueryType
func WorkflowQueryType(qt string) Tag {
return newStringTag("wf-query-type", qt)
}
// WorkflowDecisionFailCause returns tag for WorkflowDecisionFailCause
func WorkflowDecisionFailCause(decisionFailCause enumspb.WorkflowTaskFailedCause) Tag {
return newStringTag("wf-decision-fail-cause", decisionFailCause.String())
}
// WorkflowTaskQueueType returns tag for WorkflowTaskQueueType
func WorkflowTaskQueueType(taskQueueType enumspb.TaskQueueType) Tag {
return newStringTag("wf-task-queue-type", taskQueueType.String())
}
// WorkflowTaskQueueName returns tag for WorkflowTaskQueueName
func WorkflowTaskQueueName(taskQueueName string) Tag {
return newStringTag("wf-task-queue-name", taskQueueName)
}
// size limit
// WorkflowSize returns tag for WorkflowSize
func WorkflowSize(workflowSize int64) Tag {
return newInt64("wf-size", workflowSize)
}
// WorkflowSignalCount returns tag for SignalCount
func WorkflowSignalCount(signalCount int32) Tag {
return newInt32("wf-signal-count", signalCount)
}
// WorkflowHistorySize returns tag for HistorySize
func WorkflowHistorySize(historySize int) Tag {
return newInt("wf-history-size", historySize)
}
// WorkflowHistorySizeBytes returns tag for HistorySizeBytes
func WorkflowHistorySizeBytes(historySizeBytes int) Tag {
return newInt("wf-history-size-bytes", historySizeBytes)
}
// WorkflowEventCount returns tag for EventCount
func WorkflowEventCount(eventCount int) Tag {
return newInt("wf-event-count", eventCount)
}
/////////////////// System tags defined here: ///////////////////
// Tags with pre-define values
// Component returns tag for Component
func component(component string) Tag {
return newPredefinedStringTag("component", component)
}
// Lifecycle returns tag for Lifecycle
func lifecycle(lifecycle string) Tag {
return newPredefinedStringTag("lifecycle", lifecycle)
}
// StoreOperation returns tag for StoreOperation
func storeOperation(storeOperation string) Tag {
return newPredefinedStringTag("store-operation", storeOperation)
}
// OperationResult returns tag for OperationResult
func operationResult(operationResult string) Tag {
return newPredefinedStringTag("operation-result", operationResult)
}
// ErrorType returns tag for ErrorType
func errorType(errorType string) Tag {
return newPredefinedStringTag("error", errorType)
}
// Shardupdate returns tag for Shardupdate
func shardupdate(shardupdate string) Tag {
return newPredefinedStringTag("shard-update", shardupdate)
}
// general
// Service returns tag for Service
func Service(sv string) Tag {
return newStringTag("service", sv)
}
// Addresses returns tag for Addresses
func Addresses(ads []string) Tag {
return newObjectTag("addresses", ads)
}
// ListenerName returns tag for ListenerName
func ListenerName(name string) Tag {
return newStringTag("listener-name", name)
}
// Address return tag for Address
func Address(ad string) Tag {
return newStringTag("address", ad)
}
// HostID return tag for HostID
func HostID(hid string) Tag {
return newStringTag("hostId", hid)
}
// Env return tag for runtime environment
func Env(env string) Tag {
return newStringTag("env", env)
}
// Key returns tag for Key
func Key(k string) Tag {
return newStringTag("key", k)
}
// Name returns tag for Name
func Name(k string) Tag {
return newStringTag("name", k)
}
// Value returns tag for Value
func Value(v interface{}) Tag {
return newObjectTag("value", v)
}
// ValueType returns tag for ValueType
func ValueType(v interface{}) Tag {
return newStringTag("value-type", fmt.Sprintf("%T", v))
}
// DefaultValue returns tag for DefaultValue
func DefaultValue(v interface{}) Tag {
return newObjectTag("default-value", v)
}
// IgnoredValue returns tag for IgnoredValue
func IgnoredValue(v interface{}) Tag {
return newObjectTag("ignored-value", v)
}
// Port returns tag for Port
func Port(p int) Tag {
return newInt("port", p)
}
// CursorTimestamp returns tag for CursorTimestamp
func CursorTimestamp(timestamp time.Time) Tag {
return newTimeTag("cursor-timestamp", timestamp)
}
// MetricScope returns tag for MetricScope
func MetricScope(metricScope int) Tag {
return newInt("metric-scope", metricScope)
}
// StoreType returns tag for StoreType
func StoreType(storeType string) Tag {
return newPredefinedStringTag("store-type", storeType)
}
// DetailInfo returns tag for DetailInfo
func DetailInfo(i string) Tag {
return newStringTag("detail-info", i)
}
// Counter returns tag for Counter
func Counter(c int) Tag {
return newInt("counter", c)
}
// Number returns tag for Number
func Number(n int64) Tag {
return newInt64("number", n)
}
// NextNumber returns tag for NextNumber
func NextNumber(n int64) Tag {
return newInt64("next-number", n)
}
// Bool returns tag for Bool
func Bool(b bool) Tag {
return newBoolTag("bool", b)
}
// history engine shard
// ShardID returns tag for ShardID
func ShardID(shardID int) Tag {
return newInt("shard-id", shardID)
}
// ShardItem returns tag for ShardItem
func ShardItem(shardItem interface{}) Tag {
return newObjectTag("shard-item", shardItem)
}
// ShardTime returns tag for ShardTime
func ShardTime(shardTime interface{}) Tag {
return newObjectTag("shard-time", shardTime)
}
// ShardReplicationAck returns tag for ShardReplicationAck
func ShardReplicationAck(shardReplicationAck int64) Tag {
return newInt64("shard-replication-ack", shardReplicationAck)
}
// PreviousShardRangeID returns tag for PreviousShardRangeID
func PreviousShardRangeID(id int64) Tag {
return newInt64("previous-shard-range-id", id)
}
// ShardRangeID returns tag for ShardRangeID
func ShardRangeID(id int64) Tag {
return newInt64("shard-range-id", id)
}
// ReadLevel returns tag for ReadLevel
func ReadLevel(lv int64) Tag {
return newInt64("read-level", lv)
}
// MinLevel returns tag for MinLevel
func MinLevel(lv int64) Tag {
return newInt64("min-level", lv)
}
// MaxLevel returns tag for MaxLevel
func MaxLevel(lv int64) Tag {
return newInt64("max-level", lv)
}
// ShardTransferAcks returns tag for ShardTransferAcks
func ShardTransferAcks(shardTransferAcks interface{}) Tag {
return newObjectTag("shard-transfer-acks", shardTransferAcks)
}
// ShardTimerAcks returns tag for ShardTimerAcks
func ShardTimerAcks(shardTimerAcks interface{}) Tag {
return newObjectTag("shard-timer-acks", shardTimerAcks)
}
// task queue processor
// Task returns tag for Task
func Task(task interface{}) Tag {
return newObjectTag("queue-task", task)
}
// AckLevel returns tag for upper ack level
func Tasks(s interface{}) Tag {
return newObjectTag("tasks", s)
}
// TaskID returns tag for TaskID
func TaskID(taskID int64) Tag {
return newInt64("queue-task-id", taskID)
}
// TaskType returns tag for TaskType for queue processor
func TaskType(taskType enumsspb.TaskType) Tag {
return newStringTag("queue-task-type", taskType.String())
}
// TaskVersion returns tag for TaskVersion
func TaskVersion(taskVersion int64) Tag {
return newInt64("queue-task-version", taskVersion)
}
// TaskVisibilityTimestamp returns tag for task visibilityTimestamp
func TaskVisibilityTimestamp(timestamp int64) Tag {
return newInt64("queue-task-visibility-timestamp", timestamp)
}
// NumberProcessed returns tag for NumberProcessed
func NumberProcessed(n int) Tag {
return newInt("number-processed", n)
}
// NumberDeleted returns tag for NumberDeleted
func NumberDeleted(n int) Tag {
return newInt("number-deleted", n)
}
// TimerTaskStatus returns tag for TimerTaskStatus
func TimerTaskStatus(timerTaskStatus int32) Tag {
return newInt32("timer-task-status", timerTaskStatus)
}
// retry
// Attempt returns tag for Attempt
func Attempt(attempt int32) Tag {
return newInt32("attempt", attempt)
}
// AttemptCount returns tag for AttemptCount
func AttemptCount(attemptCount int64) Tag {
return newInt64("attempt-count", attemptCount)
}
// AttemptStart returns tag for AttemptStart
func AttemptStart(attemptStart time.Time) Tag {
return newTimeTag("attempt-start", attemptStart)
}
// AttemptEnd returns tag for AttemptEnd
func AttemptEnd(attemptEnd time.Time) Tag {
return newTimeTag("attempt-end", attemptEnd)
}
// ScheduleAttempt returns tag for ScheduleAttempt
func ScheduleAttempt(scheduleAttempt int64) Tag {
return newInt64("schedule-attempt", scheduleAttempt)
}
// ElasticSearch
// ESRequest returns tag for ESRequest
func ESRequest(ESRequest string) Tag {
return newStringTag("es-request", ESRequest)
}
// ESResponseStatus returns tag for ESResponse status
func ESResponseStatus(status int) Tag {
return newInt("es-response-status", status)
}
// ESResponseError returns tag for ESResponse error
func ESResponseError(msg string) Tag {
return newStringTag("es-response-error", msg)
}
// ESKey returns tag for ESKey
func ESKey(ESKey string) Tag {
return newStringTag("es-mapping-key", ESKey)
}
// ESValue returns tag for ESValue
func ESValue(ESValue []byte) Tag {
// convert value to string type so that the value logged is human readable
return newStringTag("es-mapping-value", string(ESValue))
}
// ESConfig returns tag for ESConfig
func ESConfig(c interface{}) Tag {
return newObjectTag("es-config", c)
}
// ESField returns tag for ESField
func ESField(ESField string) Tag {
return newStringTag("es-field", ESField)
}
// ESDocID returns tag for ESDocID
func ESDocID(id string) Tag {
return newStringTag("es-doc-id", id)
}
// LoggingCallAtKey is reserved tag
const LoggingCallAtKey = "logging-call-at"
// SysStackTrace returns tag for SysStackTrace
func SysStackTrace(stackTrace string) Tag {
return newStringTag("sys-stack-trace", stackTrace)
}
// Kafka related
// KafkaTopicName returns tag for TopicName
func KafkaTopicName(topicName string) Tag {
return newStringTag("kafka-topic-name", topicName)
}
// KafkaConsumerName returns tag for ConsumerName
func KafkaConsumerName(consumerName string) Tag {
return newStringTag("kafka-consumer-name", consumerName)
}
// KafkaPartition returns tag for Partition
func KafkaPartition(partition int32) Tag {
return newInt32("kafka-partition", partition)
}
// KafkaPartitionKey returns tag for PartitionKey
func KafkaPartitionKey(partitionKey interface{}) Tag {
return newObjectTag("kafka-partition-key", partitionKey)
}
// KafkaOffset returns tag for Offset
func KafkaOffset(offset int64) Tag {
return newInt64("kafka-offset", offset)
}
// TokenLastEventID returns tag for TokenLastEventID
func TokenLastEventID(id int64) Tag {
return newInt64("token-last-event-id", id)
}
/////////////////// XDC tags defined here: xdc- ///////////////////
// SourceCluster returns tag for SourceCluster
func SourceCluster(sourceCluster string) Tag {
return newStringTag("xdc-source-cluster", sourceCluster)
}
// PrevActiveCluster returns tag for PrevActiveCluster
func PrevActiveCluster(prevActiveCluster string) Tag {
return newStringTag("xdc-prev-active-cluster", prevActiveCluster)
}
// FailoverMsg returns tag for FailoverMsg
func FailoverMsg(failoverMsg string) Tag {
return newStringTag("xdc-failover-msg", failoverMsg)
}
// FailoverVersion returns tag for Version
func FailoverVersion(version int64) Tag {
return newInt64("xdc-failover-version", version)
}
// CurrentVersion returns tag for CurrentVersion
func CurrentVersion(currentVersion int64) Tag {
return newInt64("xdc-current-version", currentVersion)
}
// IncomingVersion returns tag for IncomingVersion
func IncomingVersion(incomingVersion int64) Tag {
return newInt64("xdc-incoming-version", incomingVersion)
}
// ReplicationInfo returns tag for ReplicationInfo
func ReplicationInfo(replicationInfo interface{}) Tag {
return newObjectTag("xdc-replication-info", replicationInfo)
}
// ReplicationState returns tag for ReplicationState
func ReplicationState(replicationState interface{}) Tag {
return newObjectTag("xdc-replication-state", replicationState)
}
// FirstEventVersion returns tag for FirstEventVersion
func FirstEventVersion(version int64) Tag {
return newInt64("xdc-first-event-version", version)
}
// LastEventVersion returns tag for LastEventVersion
func LastEventVersion(version int64) Tag {
return newInt64("xdc-last-event-version", version)
}
// TokenLastEventVersion returns tag for TokenLastEventVersion
func TokenLastEventVersion(version int64) Tag {
return newInt64("xdc-token-last-event-version", version)
}
/////////////////// Archival tags defined here: archival- ///////////////////
// archival request tags
// ArchivalCallerServiceName returns tag for the service name calling archival client
func ArchivalCallerServiceName(callerServiceName string) Tag {
return newStringTag("archival-caller-service-name", callerServiceName)
}
// ArchivalArchiveAttemptedInline returns tag for whether archival is attempted inline before signal is sent.
func ArchivalArchiveAttemptedInline(archiveInline bool) Tag {
return newBoolTag("archival-archive-attempted-inline", archiveInline)
}
// ArchivalRequestNamespaceID returns tag for RequestNamespaceID
func ArchivalRequestNamespaceID(requestNamespaceID string) Tag {
return newStringTag("archival-request-namespace-id", requestNamespaceID)
}
// ArchivalRequestNamespace returns tag for RequestNamespace
func ArchivalRequestNamespace(requestNamespace string) Tag {
return newStringTag("archival-request-namespace", requestNamespace)
}
// ArchivalRequestWorkflowID returns tag for RequestWorkflowID
func ArchivalRequestWorkflowID(requestWorkflowID string) Tag {
return newStringTag("archival-request-workflow-id", requestWorkflowID)
}
// ArchvialRequestWorkflowType returns tag for RequestWorkflowType
func ArchvialRequestWorkflowType(requestWorkflowType string) Tag {
return newStringTag("archival-request-workflow-type", requestWorkflowType)
}
// ArchivalRequestRunID returns tag for RequestRunID
func ArchivalRequestRunID(requestRunID string) Tag {
return newStringTag("archival-request-run-id", requestRunID)
}
// ArchivalRequestBranchToken returns tag for RequestBranchToken
func ArchivalRequestBranchToken(requestBranchToken []byte) Tag {
return newObjectTag("archival-request-branch-token", requestBranchToken)
}
// ArchivalRequestNextEventID returns tag for RequestNextEventID
func ArchivalRequestNextEventID(requestNextEventID int64) Tag {
return newInt64("archival-request-next-event-id", requestNextEventID)
}
// ArchivalRequestCloseFailoverVersion returns tag for RequestCloseFailoverVersion
func ArchivalRequestCloseFailoverVersion(requestCloseFailoverVersion int64) Tag {
return newInt64("archival-request-close-failover-version", requestCloseFailoverVersion)
}
// ArchivalRequestCloseTimestamp returns tag for RequestCloseTimestamp
func ArchivalRequestCloseTimestamp(requestCloseTimeStamp int64) Tag {
return newInt64("archival-request-close-timestamp", requestCloseTimeStamp)
}
// ArchivalRequestStatus returns tag for RequestStatus
func ArchivalRequestStatus(requestStatus string) Tag {
return newStringTag("archival-request-status", requestStatus)
}
// ArchivalURI returns tag for Archival URI
func ArchivalURI(URI string) Tag {
return newStringTag("archival-URI", URI)
}
// ArchivalArchiveFailReason returns tag for ArchivalArchiveFailReason
func ArchivalArchiveFailReason(archiveFailReason string) Tag {
return newStringTag("archival-archive-fail-reason", archiveFailReason)
}
// ArchivalDeleteHistoryFailReason returns tag for ArchivalDeleteHistoryFailReason
func ArchivalDeleteHistoryFailReason(deleteHistoryFailReason string) Tag {
return newStringTag("archival-delete-history-fail-reason", deleteHistoryFailReason)
}
// ArchivalVisibilityQuery returns tag for the query for getting archived visibility record
func ArchivalVisibilityQuery(query string) Tag {
return newStringTag("archival-visibility-query", query)
}
// The following logger tags are only used by internal archiver implemention.
// TODO: move them to internal repo once temporal plugin model is in place.
// ArchivalBlobKey returns tag for BlobKey
func ArchivalBlobKey(blobKey string) Tag {
return newStringTag("archival-blob-key", blobKey)
}
// ArchivalDeterministicConstructionCheckFailReason returns tag for ArchivalDeterministicConstructionCheckFailReason
func ArchivalDeterministicConstructionCheckFailReason(deterministicConstructionCheckFailReason string) Tag {
return newStringTag("archival-deterministic-construction-check-fail-reason", deterministicConstructionCheckFailReason)
}
// ArchivalNonDeterministicBlobKey returns tag for randomly generated NonDeterministicBlobKey
func ArchivalNonDeterministicBlobKey(nondeterministicBlobKey string) Tag {
return newStringTag("archival-non-deterministic-blob-key", nondeterministicBlobKey)
}
// ArchivalBlobIntegrityCheckFailReason returns tag for ArchivalBlobIntegrityCheckFailReason
func ArchivalBlobIntegrityCheckFailReason(blobIntegrityCheckFailReason string) Tag {
return newStringTag("archival-blob-integrity-check-fail-reason", blobIntegrityCheckFailReason)
}
// ArchivalBlobstoreContextTimeout returns tag for ArchivalBlobstoreContextTimeout
func ArchivalBlobstoreContextTimeout(blobstoreContextTimeout time.Duration) Tag {
return newDurationTag("archival-blobstore-context-timeout", blobstoreContextTimeout)
}
// TransportType returns tag for transportType
func TransportType(transportType string) Tag {
return newStringTag("transport-type", transportType)
}
// ActivityInfo returns tag for activity info
func ActivityInfo(activityInfo interface{}) Tag {
return newObjectTag("activity-info", activityInfo)
}
// DecisionRequestId returns tag for decision RequestId
func DecisionRequestId(s string) Tag {
return newStringTag("decision-request-id", s)
}
// AckLevel returns tag for ack level
func AckLevel(s interface{}) Tag {
return newObjectTag("ack-level", s)
}
// QueryLevel returns tag for query level
func QueryLevel(s time.Time) Tag {
return newTimeTag("query-level", s)
}
// TaskQueueInfo returns tag for task queue info
func TaskQueueInfo(s interface{}) Tag {
return newObjectTag("task-queue-info", s)
}
| 1 | 9,845 | Having "wf" prefix doesn't make sense anymore. Please remove. | temporalio-temporal | go |
@@ -5,15 +5,15 @@ import {
teardown,
getMixedArray,
mixedArrayHTML,
- serializeHtml
+ serializeHtml,
+ sortAttributes,
+ spyAll
} from '../_util/helpers';
import { div, span, p } from '../_util/dom';
/** @jsx createElement */
const h = createElement;
-let spyAll = obj => Object.keys(obj).forEach(key => sinon.spy(obj, key));
-
function getAttributes(node) {
let attrs = {};
if (node.attributes) { | 1 | import { createElement, render, Component, Fragment } from 'preact';
import { setupRerender } from 'preact/test-utils';
import {
setupScratch,
teardown,
getMixedArray,
mixedArrayHTML,
serializeHtml
} from '../_util/helpers';
import { div, span, p } from '../_util/dom';
/** @jsx createElement */
const h = createElement;
let spyAll = obj => Object.keys(obj).forEach(key => sinon.spy(obj, key));
function getAttributes(node) {
let attrs = {};
if (node.attributes) {
for (let i = node.attributes.length; i--; ) {
attrs[node.attributes[i].name] = node.attributes[i].value;
}
}
return attrs;
}
// hacky normalization of attribute order across browsers.
function sortAttributes(html) {
return html.replace(
/<([a-z0-9-]+)((?:\s[a-z0-9:_.-]+=".*?")+)((?:\s*\/)?>)/gi,
(s, pre, attrs, after) => {
let list = attrs
.match(/\s[a-z0-9:_.-]+=".*?"/gi)
.sort((a, b) => (a > b ? 1 : -1));
if (~after.indexOf('/')) after = '></' + pre + '>';
return '<' + pre + list.join('') + after;
}
);
}
describe('Components', () => {
/** @type {HTMLDivElement} */
let scratch;
/** @type {() => void} */
let rerender;
beforeEach(() => {
scratch = setupScratch();
rerender = setupRerender();
});
afterEach(() => {
teardown(scratch);
});
describe('Component construction', () => {
/** @type {object} */
let instance;
let PROPS;
let STATE;
beforeEach(() => {
instance = null;
PROPS = { foo: 'bar', onBaz: () => {} };
STATE = { text: 'Hello' };
});
it('should render components', () => {
class C1 extends Component {
render() {
return <div>C1</div>;
}
}
sinon.spy(C1.prototype, 'render');
render(<C1 />, scratch);
expect(C1.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch({}, {})
.and.to.have.returned(sinon.match({ type: 'div' }));
expect(scratch.innerHTML).to.equal('<div>C1</div>');
});
it('should render functional components', () => {
const C3 = sinon.spy(props => <div {...props} />);
render(<C3 {...PROPS} />, scratch);
expect(C3)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(PROPS)
.and.to.have.returned(
sinon.match({
type: 'div',
props: PROPS
})
);
expect(scratch.innerHTML).to.equal('<div foo="bar"></div>');
});
it('should render components with props', () => {
let constructorProps;
class C2 extends Component {
constructor(props) {
super(props);
constructorProps = props;
}
render(props) {
return <div {...props} />;
}
}
sinon.spy(C2.prototype, 'render');
render(<C2 {...PROPS} />, scratch);
expect(constructorProps).to.deep.equal(PROPS);
expect(C2.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(PROPS, {})
.and.to.have.returned(
sinon.match({
type: 'div',
props: PROPS
})
);
expect(scratch.innerHTML).to.equal('<div foo="bar"></div>');
});
it('should not crash when setting state with cb in constructor', () => {
let spy = sinon.spy();
class Foo extends Component {
constructor(props) {
super(props);
this.setState({ preact: 'awesome' }, spy);
}
}
expect(() => render(<Foo foo="bar" />, scratch)).not.to.throw();
rerender();
expect(spy).to.not.be.called;
});
it('should not crash when calling forceUpdate with cb in constructor', () => {
let spy = sinon.spy();
class Foo extends Component {
constructor(props) {
super(props);
this.forceUpdate(spy);
}
}
expect(() => render(<Foo foo="bar" />, scratch)).not.to.throw();
rerender();
expect(spy).to.not.be.called;
});
it('should accurately call nested setState callbacks', () => {
let states = [];
let finalState;
class Foo extends Component {
constructor(props) {
super(props);
this.state = { a: 'b' };
}
componentDidMount() {
states.push(this.state);
expect(scratch.innerHTML).to.equal('<p>b</p>');
// eslint-disable-next-line
this.setState({ a: 'a' }, () => {
states.push(this.state);
expect(scratch.innerHTML).to.equal('<p>a</p>');
this.setState({ a: 'c' }, () => {
expect(scratch.innerHTML).to.equal('<p>c</p>');
states.push(this.state);
});
});
}
render() {
finalState = this.state;
return <p>{this.state.a}</p>;
}
}
render(<Foo />, scratch);
rerender(); // First setState
rerender(); // Second setState
let [firstState, secondState, thirdState] = states;
expect(finalState).to.deep.equal({ a: 'c' });
expect(firstState).to.deep.equal({ a: 'b' });
expect(secondState).to.deep.equal({ a: 'a' });
expect(thirdState).to.deep.equal({ a: 'c' });
});
it('should initialize props & context but not state in Component constructor', () => {
// Not initializing state matches React behavior: https://codesandbox.io/s/rml19v8o2q
class Foo extends Component {
constructor(props, context) {
super(props, context);
expect(this.props).to.equal(props);
expect(this.state).to.deep.equal(undefined);
expect(this.context).to.equal(context);
instance = this;
}
render(props) {
return <div {...props}>Hello</div>;
}
}
sinon.spy(Foo.prototype, 'render');
render(<Foo {...PROPS} />, scratch);
expect(Foo.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(PROPS, {}, {})
.and.to.have.returned(sinon.match({ type: 'div', props: PROPS }));
expect(instance.props).to.deep.equal(PROPS);
expect(instance.state).to.deep.equal({});
expect(instance.context).to.deep.equal({});
expect(scratch.innerHTML).to.equal('<div foo="bar">Hello</div>');
});
it("should render Component classes that don't pass args into the Component constructor", () => {
function Foo() {
Component.call(this);
instance = this;
this.state = STATE;
}
Foo.prototype.render = sinon.spy((props, state) => (
<div {...props}>{state.text}</div>
));
render(<Foo {...PROPS} />, scratch);
expect(Foo.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(
PROPS,
STATE,
{}
)
.and.to.have.returned(sinon.match({ type: 'div', props: PROPS }));
expect(instance.props).to.deep.equal(PROPS);
expect(instance.state).to.deep.equal(STATE);
expect(instance.context).to.deep.equal({});
expect(scratch.innerHTML).to.equal('<div foo="bar">Hello</div>');
});
it('should also update the current dom', () => {
let trigger;
class A extends Component {
constructor(props) {
super(props);
this.state = { show: false };
trigger = this.set = this.set.bind(this);
}
set() {
this.setState({ show: true });
}
render() {
return this.state.show ? <div>A</div> : null;
}
}
const B = () => <p>B</p>;
render(
<div>
<A />
<B />
</div>,
scratch
);
expect(scratch.innerHTML).to.equal('<div><p>B</p></div>');
trigger();
rerender();
expect(scratch.innerHTML).to.equal('<div><div>A</div><p>B</p></div>');
});
it('should not orphan children', () => {
let triggerC, triggerA;
const B = () => <p>B</p>;
// Component with state which swaps its returned element type
class C extends Component {
constructor(props) {
super(props);
this.state = { show: false };
triggerC = this.set = this.set.bind(this);
}
set() {
this.setState({ show: true });
}
render() {
return this.state.show ? <div>data</div> : <p>Loading</p>;
}
}
const WrapC = () => <C />;
class A extends Component {
constructor(props) {
super(props);
this.state = { show: false };
triggerA = this.set = this.set.bind(this);
}
set() {
this.setState({ show: true });
}
render() {
return this.state.show ? <B /> : <WrapC />;
}
}
render(<A />, scratch);
expect(scratch.innerHTML).to.equal('<p>Loading</p>');
triggerC();
rerender();
expect(scratch.innerHTML).to.equal('<div>data</div>');
triggerA();
rerender();
expect(scratch.innerHTML).to.equal('<p>B</p>');
});
it("should render components that don't pass args into the Component constructor (unistore pattern)", () => {
// Pattern unistore uses for connect: https://git.io/fxRqu
function Wrapper() {
instance = this;
this.state = STATE;
this.render = sinon.spy((props, state) => (
<div {...props}>{state.text}</div>
));
}
(Wrapper.prototype = new Component()).constructor = Wrapper;
render(<Wrapper {...PROPS} />, scratch);
expect(instance.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(
PROPS,
STATE,
{}
)
.and.to.have.returned(sinon.match({ type: 'div', props: PROPS }));
expect(instance.props).to.deep.equal(PROPS);
expect(instance.state).to.deep.equal(STATE);
expect(instance.context).to.deep.equal({});
expect(scratch.innerHTML).to.equal('<div foo="bar">Hello</div>');
});
it("should render components that don't call Component constructor", () => {
function Foo() {
instance = this;
this.state = STATE;
}
Foo.prototype = Object.create(Component);
Foo.prototype.render = sinon.spy((props, state) => (
<div {...props}>{state.text}</div>
));
render(<Foo {...PROPS} />, scratch);
expect(Foo.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(
PROPS,
STATE,
{}
)
.and.to.have.returned(sinon.match({ type: 'div', props: PROPS }));
expect(instance.props).to.deep.equal(PROPS);
expect(instance.state).to.deep.equal(STATE);
expect(instance.context).to.deep.equal({});
expect(scratch.innerHTML).to.equal('<div foo="bar">Hello</div>');
});
it("should render components that don't call Component constructor and don't initialize state", () => {
function Foo() {
instance = this;
}
Foo.prototype.render = sinon.spy(props => <div {...props}>Hello</div>);
render(<Foo {...PROPS} />, scratch);
expect(Foo.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(PROPS, {}, {})
.and.to.have.returned(sinon.match({ type: 'div', props: PROPS }));
expect(instance.props).to.deep.equal(PROPS);
expect(instance.state).to.deep.equal({});
expect(instance.context).to.deep.equal({});
expect(scratch.innerHTML).to.equal('<div foo="bar">Hello</div>');
});
it("should render components that don't inherit from Component", () => {
class Foo {
constructor() {
instance = this;
this.state = STATE;
}
render(props, state) {
return <div {...props}>{state.text}</div>;
}
}
sinon.spy(Foo.prototype, 'render');
render(<Foo {...PROPS} />, scratch);
expect(Foo.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(
PROPS,
STATE,
{}
)
.and.to.have.returned(sinon.match({ type: 'div', props: PROPS }));
expect(instance.props).to.deep.equal(PROPS);
expect(instance.state).to.deep.equal(STATE);
expect(instance.context).to.deep.equal({});
expect(scratch.innerHTML).to.equal('<div foo="bar">Hello</div>');
});
it("should render components that don't inherit from Component (unistore pattern)", () => {
// Pattern unistore uses for Provider: https://git.io/fxRqR
function Provider() {
instance = this;
this.state = STATE;
}
Provider.prototype.render = sinon.spy((props, state) => (
<div {...PROPS}>{state.text}</div>
));
render(<Provider {...PROPS} />, scratch);
expect(Provider.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(
PROPS,
STATE,
{}
)
.and.to.have.returned(sinon.match({ type: 'div', props: PROPS }));
expect(instance.props).to.deep.equal(PROPS);
expect(instance.state).to.deep.equal(STATE);
expect(instance.context).to.deep.equal({});
expect(scratch.innerHTML).to.equal('<div foo="bar">Hello</div>');
});
it("should render components that don't inherit from Component and don't initialize state", () => {
class Foo {
constructor() {
instance = this;
}
render(props, state) {
return <div {...props}>Hello</div>;
}
}
sinon.spy(Foo.prototype, 'render');
render(<Foo {...PROPS} />, scratch);
expect(Foo.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(PROPS, {}, {})
.and.to.have.returned(sinon.match({ type: 'div', props: PROPS }));
expect(instance.props).to.deep.equal(PROPS);
expect(instance.state).to.deep.equal({});
expect(instance.context).to.deep.equal({});
expect(scratch.innerHTML).to.equal('<div foo="bar">Hello</div>');
});
it('should render class components that inherit from Component without a render method', () => {
class Foo extends Component {
constructor(props, context) {
super(props, context);
instance = this;
}
}
sinon.spy(Foo.prototype, 'render');
render(<Foo {...PROPS} />, scratch);
expect(Foo.prototype.render)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(PROPS, {}, {})
.and.to.have.returned(undefined);
expect(instance.props).to.deep.equal(PROPS);
expect(instance.state).to.deep.equal({});
expect(instance.context).to.deep.equal({});
expect(scratch.innerHTML).to.equal('');
});
});
it('should render string', () => {
class StringComponent extends Component {
render() {
return 'Hi there';
}
}
render(<StringComponent />, scratch);
expect(scratch.innerHTML).to.equal('Hi there');
});
it('should render number as string', () => {
class NumberComponent extends Component {
render() {
return 42;
}
}
render(<NumberComponent />, scratch);
expect(scratch.innerHTML).to.equal('42');
});
it('should render null as empty string', () => {
class NullComponent extends Component {
render() {
return null;
}
}
render(<NullComponent />, scratch);
expect(scratch.innerHTML).to.equal('');
});
// Test for Issue #73
it('should remove orphaned elements replaced by Components', () => {
class Comp extends Component {
render() {
return <span>span in a component</span>;
}
}
let root;
function test(content) {
root = render(content, scratch, root);
}
test(<Comp />);
test(<div>just a div</div>);
test(<Comp />);
expect(scratch.innerHTML).to.equal('<span>span in a component</span>');
});
// Test for Issue preactjs/preact#176
it('should remove children when root changes to text node', () => {
/** @type {import('preact').Component} */
let comp;
class Comp extends Component {
constructor() {
super();
comp = this;
}
render(_, { alt }) {
return alt ? 'asdf' : <div>test</div>;
}
}
render(<Comp />, scratch);
comp.setState({ alt: true });
comp.forceUpdate();
rerender();
expect(scratch.innerHTML, 'switching to textnode').to.equal('asdf');
comp.setState({ alt: false });
comp.forceUpdate();
rerender();
expect(scratch.innerHTML, 'switching to element').to.equal(
'<div>test</div>'
);
comp.setState({ alt: true });
comp.forceUpdate();
rerender();
expect(scratch.innerHTML, 'switching to textnode 2').to.equal('asdf');
});
// Test for Issue preactjs/preact#1616
it('should maintain order when setting state (that inserts dom-elements)', () => {
let add, addTwice, reset;
const Entry = props => <div>{props.children}</div>;
class App extends Component {
constructor(props) {
super(props);
this.state = { values: ['abc'] };
add = this.add = this.add.bind(this);
addTwice = this.addTwice = this.addTwice.bind(this);
reset = this.reset = this.reset.bind(this);
}
add() {
this.setState({ values: [...this.state.values, 'def'] });
}
addTwice() {
this.setState({ values: [...this.state.values, 'def', 'ghi'] });
}
reset() {
this.setState({ values: ['abc'] });
}
render() {
return (
<div>
{this.state.values.map(v => (
<Entry>{v}</Entry>
))}
<button>First Button</button>
<button>Second Button</button>
<button>Third Button</button>
</div>
);
}
}
render(<App />, scratch);
expect(scratch.firstChild.innerHTML).to.equal(
'<div>abc</div>' +
'<button>First Button</button><button>Second Button</button><button>Third Button</button>'
);
add();
rerender();
expect(scratch.firstChild.innerHTML).to.equal(
'<div>abc</div><div>def' +
'</div><button>First Button</button><button>Second Button</button><button>Third Button</button>'
);
add();
rerender();
expect(scratch.firstChild.innerHTML).to.equal(
'<div>abc</div><div>def</div><div>def' +
'</div><button>First Button</button><button>Second Button</button><button>Third Button</button>'
);
reset();
rerender();
expect(scratch.firstChild.innerHTML).to.equal(
'<div>abc</div>' +
'<button>First Button</button><button>Second Button</button><button>Third Button</button>'
);
addTwice();
rerender();
expect(scratch.firstChild.innerHTML).to.equal(
'<div>abc</div><div>def</div><div>ghi' +
'</div><button>First Button</button><button>Second Button</button><button>Third Button</button>'
);
});
// Test for Issue preactjs/preact#254
it('should not recycle common class children with different keys', () => {
let idx = 0;
let msgs = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'];
let sideEffect = sinon.spy();
class Comp extends Component {
componentWillMount() {
this.innerMsg = msgs[idx++ % 8];
sideEffect();
}
render() {
return <div>{this.innerMsg}</div>;
}
}
sinon.spy(Comp.prototype, 'componentWillMount');
let good, bad;
class GoodContainer extends Component {
constructor(props) {
super(props);
this.state = { alt: false };
good = this;
}
render(_, { alt }) {
return (
<div>
{alt ? null : <Comp key={1} alt={alt} />}
{alt ? null : <Comp key={2} alt={alt} />}
{alt ? <Comp key={3} alt={alt} /> : null}
</div>
);
}
}
class BadContainer extends Component {
constructor(props) {
super(props);
this.state = { alt: false };
bad = this;
}
render(_, { alt }) {
return (
<div>
{alt ? null : <Comp alt={alt} />}
{alt ? null : <Comp alt={alt} />}
{alt ? <Comp alt={alt} /> : null}
</div>
);
}
}
render(<GoodContainer />, scratch);
expect(scratch.textContent, 'new component with key present').to.equal(
'AB'
);
expect(Comp.prototype.componentWillMount).to.have.been.calledTwice;
expect(sideEffect).to.have.been.calledTwice;
sideEffect.resetHistory();
Comp.prototype.componentWillMount.resetHistory();
good.setState({ alt: true });
rerender();
expect(
scratch.textContent,
'new component with key present re-rendered'
).to.equal('C');
//we are recycling the first 2 components already rendered, just need a new one
expect(Comp.prototype.componentWillMount).to.have.been.calledOnce;
expect(sideEffect).to.have.been.calledOnce;
sideEffect.resetHistory();
Comp.prototype.componentWillMount.resetHistory();
render(<BadContainer />, scratch);
expect(scratch.textContent, 'new component without key').to.equal('DE');
expect(Comp.prototype.componentWillMount).to.have.been.calledTwice;
expect(sideEffect).to.have.been.calledTwice;
sideEffect.resetHistory();
Comp.prototype.componentWillMount.resetHistory();
bad.setState({ alt: true });
rerender();
expect(
scratch.textContent,
'use null placeholders to detect new component is appended'
).to.equal('F');
expect(Comp.prototype.componentWillMount).to.be.calledOnce;
expect(sideEffect).to.be.calledOnce;
});
describe('array children', () => {
it("should render DOM element's array children", () => {
render(<div>{getMixedArray()}</div>, scratch);
expect(scratch.firstChild.innerHTML).to.equal(mixedArrayHTML);
});
it("should render Component's array children", () => {
const Foo = () => getMixedArray();
render(<Foo />, scratch);
expect(scratch.innerHTML).to.equal(mixedArrayHTML);
});
it("should render Fragment's array children", () => {
const Foo = () => <Fragment>{getMixedArray()}</Fragment>;
render(<Foo />, scratch);
expect(scratch.innerHTML).to.equal(mixedArrayHTML);
});
it('should render sibling array children', () => {
const Todo = () => (
<ul>
<li>A header</li>
{['a', 'b'].map(value => (
<li>{value}</li>
))}
<li>A divider</li>
{['c', 'd'].map(value => (
<li>{value}</li>
))}
<li>A footer</li>
</ul>
);
render(<Todo />, scratch);
let ul = scratch.firstChild;
expect(ul.childNodes.length).to.equal(7);
expect(ul.childNodes[0].textContent).to.equal('A header');
expect(ul.childNodes[1].textContent).to.equal('a');
expect(ul.childNodes[2].textContent).to.equal('b');
expect(ul.childNodes[3].textContent).to.equal('A divider');
expect(ul.childNodes[4].textContent).to.equal('c');
expect(ul.childNodes[5].textContent).to.equal('d');
expect(ul.childNodes[6].textContent).to.equal('A footer');
});
});
describe('props.children', () => {
let children;
let Foo = props => {
children = props.children;
return <div>{props.children}</div>;
};
let FunctionFoo = props => {
children = props.children;
return <div>{props.children(2)}</div>;
};
let Bar = () => <span>Bar</span>;
beforeEach(() => {
children = undefined;
});
it('should support passing children as a prop', () => {
const Foo = props => <div {...props} />;
render(
<Foo a="b" children={[<span class="bar">bar</span>, '123', 456]} />,
scratch
);
expect(scratch.innerHTML).to.equal(
'<div a="b"><span class="bar">bar</span>123456</div>'
);
});
it('should be ignored when explicit children exist', () => {
const Foo = props => <div {...props}>a</div>;
render(<Foo children={'b'} />, scratch);
expect(scratch.innerHTML).to.equal('<div>a</div>');
});
it('should be undefined with no child', () => {
render(<Foo />, scratch);
expect(children).to.be.undefined;
expect(scratch.innerHTML).to.equal('<div></div>');
});
it('should be undefined with null as a child', () => {
render(<Foo>{null}</Foo>, scratch);
expect(children).to.be.undefined;
expect(scratch.innerHTML).to.equal('<div></div>');
});
it('should be false with false as a child', () => {
render(<Foo>{false}</Foo>, scratch);
expect(children).to.be.false;
expect(scratch.innerHTML).to.equal('<div></div>');
});
it('should be true with true as a child', () => {
render(<Foo>{true}</Foo>, scratch);
expect(children).to.be.true;
expect(scratch.innerHTML).to.equal('<div></div>');
});
it('should be a string with a text child', () => {
render(<Foo>text</Foo>, scratch);
expect(children).to.be.a('string');
expect(children).to.equal('text');
expect(scratch.innerHTML).to.equal('<div>text</div>');
});
it('should be a string with a number child', () => {
render(<Foo>1</Foo>, scratch);
expect(children).to.be.a('string');
expect(children).to.equal('1');
expect(scratch.innerHTML).to.equal('<div>1</div>');
});
it('should be a VNode with a DOM node child', () => {
render(
<Foo>
<span />
</Foo>,
scratch
);
expect(children).to.be.an('object');
expect(children.type).to.equal('span');
expect(scratch.innerHTML).to.equal('<div><span></span></div>');
});
it('should be a VNode with a Component child', () => {
render(
<Foo>
<Bar />
</Foo>,
scratch
);
expect(children).to.be.an('object');
expect(children.type).to.equal(Bar);
expect(scratch.innerHTML).to.equal('<div><span>Bar</span></div>');
});
it('should be a function with a function child', () => {
const child = num => num.toFixed(2);
render(<FunctionFoo>{child}</FunctionFoo>, scratch);
expect(children).to.be.an('function');
expect(children).to.equal(child);
expect(scratch.innerHTML).to.equal('<div>2.00</div>');
});
it('should be an array with multiple children', () => {
render(
<Foo>
0<span />
<input />
<div />1
</Foo>,
scratch
);
expect(children).to.be.an('array');
expect(children[0]).to.equal('0');
expect(children[1].type).to.equal('span');
expect(children[2].type).to.equal('input');
expect(children[3].type).to.equal('div');
expect(children[4]).to.equal('1');
expect(scratch.innerHTML).to.equal(
`<div>0<span></span><input><div></div>1</div>`
);
});
it('should be an array with an array as children', () => {
const mixedArray = getMixedArray();
render(<Foo>{mixedArray}</Foo>, scratch);
expect(children).to.be.an('array');
expect(children).to.deep.equal(mixedArray);
expect(scratch.innerHTML).to.equal(`<div>${mixedArrayHTML}</div>`);
});
it('should not flatten sibling and nested arrays', () => {
const list1 = [0, 1];
const list2 = [2, 3];
const list3 = [4, 5];
const list4 = [6, 7];
const list5 = [8, 9];
render(
<Foo>
{[list1, list2]}
{[list3, list4]}
{list5}
</Foo>,
scratch
);
expect(children).to.be.an('array');
expect(children).to.deep.equal([[list1, list2], [list3, list4], list5]);
expect(scratch.innerHTML).to.equal('<div>0123456789</div>');
});
});
describe('High-Order Components', () => {
it('should render wrapper HOCs', () => {
const text = "We'll throw some happy little limbs on this tree.";
function withBobRoss(ChildComponent) {
return class BobRossIpsum extends Component {
getChildContext() {
return { text };
}
render(props) {
return <ChildComponent {...props} />;
}
};
}
const PaintSomething = (props, context) => <div>{context.text}</div>;
const Paint = withBobRoss(PaintSomething);
render(<Paint />, scratch);
expect(scratch.innerHTML).to.equal(`<div>${text}</div>`);
});
it('should render HOCs with generic children', () => {
const text =
"Let your imagination just wonder around when you're doing these things.";
class BobRossProvider extends Component {
getChildContext() {
return { text };
}
render(props) {
return props.children;
}
}
function BobRossConsumer(props, context) {
return props.children(context.text);
}
const Say = props => <div>{props.text}</div>;
const Speak = () => (
<BobRossProvider>
<span>A span</span>
<BobRossConsumer>{text => <Say text={text} />}</BobRossConsumer>
<span>A final span</span>
</BobRossProvider>
);
render(<Speak />, scratch);
expect(scratch.innerHTML).to.equal(
`<span>A span</span><div>${text}</div><span>A final span</span>`
);
});
it('should render nested functional components', () => {
const PROPS = { foo: 'bar', onBaz: () => {} };
const Outer = sinon.spy(props => <Inner {...props} />);
const Inner = sinon.spy(props => <div {...props}>inner</div>);
render(<Outer {...PROPS} />, scratch);
expect(Outer)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(PROPS)
.and.to.have.returned(
sinon.match({
type: Inner,
props: PROPS
})
);
expect(Inner)
.to.have.been.calledOnce.and.to.have.been.calledWithMatch(PROPS)
.and.to.have.returned(
sinon.match({
type: 'div',
props: { ...PROPS, children: 'inner' }
})
);
expect(scratch.innerHTML).to.equal('<div foo="bar">inner</div>');
});
it('should re-render nested functional components', () => {
let doRender = null;
class Outer extends Component {
componentDidMount() {
let i = 1;
doRender = () => this.setState({ i: ++i });
}
componentWillUnmount() {}
render(props, { i }) {
return <Inner i={i} {...props} />;
}
}
sinon.spy(Outer.prototype, 'render');
sinon.spy(Outer.prototype, 'componentWillUnmount');
let j = 0;
const Inner = sinon.spy(props => (
<div j={++j} {...props}>
inner
</div>
));
render(<Outer foo="bar" />, scratch);
// update & flush
doRender();
rerender();
expect(Outer.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner).to.have.been.calledTwice;
expect(Inner.secondCall)
.to.have.been.calledWithMatch({ foo: 'bar', i: 2 })
.and.to.have.returned(
sinon.match({
props: {
j: 2,
i: 2,
foo: 'bar'
}
})
);
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '2',
i: '2',
foo: 'bar'
});
// update & flush
doRender();
rerender();
expect(Inner).to.have.been.calledThrice;
expect(Inner.thirdCall)
.to.have.been.calledWithMatch({ foo: 'bar', i: 3 })
.and.to.have.returned(
sinon.match({
props: {
j: 3,
i: 3,
foo: 'bar'
}
})
);
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '3',
i: '3',
foo: 'bar'
});
});
it('should re-render nested components', () => {
let doRender = null,
alt = false;
class Outer extends Component {
componentDidMount() {
let i = 1;
doRender = () => this.setState({ i: ++i });
}
componentWillUnmount() {}
render(props, { i }) {
if (alt) return <div is-alt />;
return <Inner i={i} {...props} />;
}
}
sinon.spy(Outer.prototype, 'render');
sinon.spy(Outer.prototype, 'componentDidMount');
sinon.spy(Outer.prototype, 'componentWillUnmount');
let j = 0;
class Inner extends Component {
constructor(...args) {
super();
}
componentWillMount() {}
componentDidMount() {}
componentWillUnmount() {}
render(props) {
return (
<div j={++j} {...props}>
inner
</div>
);
}
}
sinon.spy(Inner.prototype, 'render');
sinon.spy(Inner.prototype, 'componentWillMount');
sinon.spy(Inner.prototype, 'componentDidMount');
sinon.spy(Inner.prototype, 'componentWillUnmount');
render(<Outer foo="bar" />, scratch);
expect(Outer.prototype.componentDidMount).to.have.been.calledOnce;
// update & flush
doRender();
rerender();
expect(Outer.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner.prototype.componentWillMount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount).to.have.been.calledOnce;
expect(Inner.prototype.render).to.have.been.calledTwice;
expect(Inner.prototype.render.secondCall)
.to.have.been.calledWithMatch({ foo: 'bar', i: 2 })
.and.to.have.returned(
sinon.match({
props: {
j: 2,
i: 2,
foo: 'bar'
}
})
);
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '2',
i: '2',
foo: 'bar'
});
expect(serializeHtml(scratch)).to.equal(
sortAttributes('<div foo="bar" j="2" i="2">inner</div>')
);
// update & flush
doRender();
rerender();
expect(Inner.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner.prototype.componentWillMount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount).to.have.been.calledOnce;
expect(Inner.prototype.render).to.have.been.calledThrice;
expect(Inner.prototype.render.thirdCall)
.to.have.been.calledWithMatch({ foo: 'bar', i: 3 })
.and.to.have.returned(
sinon.match({
props: {
j: 3,
i: 3,
foo: 'bar'
}
})
);
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '3',
i: '3',
foo: 'bar'
});
// update & flush
alt = true;
doRender();
rerender();
expect(Inner.prototype.componentWillUnmount).to.have.been.calledOnce;
expect(scratch.innerHTML).to.equal('<div is-alt="true"></div>');
// update & flush
alt = false;
doRender();
rerender();
expect(serializeHtml(scratch)).to.equal(
sortAttributes('<div foo="bar" j="4" i="5">inner</div>')
);
});
it('should resolve intermediary functional component', () => {
let ctx = {};
class Root extends Component {
getChildContext() {
return { ctx };
}
render() {
return <Func />;
}
}
const Func = () => <Inner />;
class Inner extends Component {
componentWillMount() {}
componentDidMount() {}
componentWillUnmount() {}
render() {
return <div>inner</div>;
}
}
spyAll(Inner.prototype);
render(<Root />, scratch);
expect(Inner.prototype.componentWillMount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount).to.have.been.calledOnce;
expect(Inner.prototype.componentWillMount).to.have.been.calledBefore(
Inner.prototype.componentDidMount
);
render(<asdf />, scratch);
expect(Inner.prototype.componentWillUnmount).to.have.been.calledOnce;
});
it('should unmount children of high-order components without unmounting parent', () => {
let outer,
inner2,
counter = 0;
class Outer extends Component {
constructor(props, context) {
super(props, context);
outer = this;
this.state = {
child: this.props.child
};
}
componentWillUnmount() {}
componentWillMount() {}
componentDidMount() {}
render(_, { child: C }) {
return <C />;
}
}
spyAll(Outer.prototype);
class Inner extends Component {
componentWillUnmount() {}
componentWillMount() {}
componentDidMount() {}
render() {
return h('element' + ++counter);
}
}
spyAll(Inner.prototype);
class Inner2 extends Component {
constructor(props, context) {
super(props, context);
inner2 = this;
}
componentWillUnmount() {}
componentWillMount() {}
componentDidMount() {}
render() {
return h('element' + ++counter);
}
}
spyAll(Inner2.prototype);
render(<Outer child={Inner} />, scratch);
// outer should only have been mounted once
expect(Outer.prototype.componentWillMount, 'outer initial').to.have.been
.calledOnce;
expect(Outer.prototype.componentDidMount, 'outer initial').to.have.been
.calledOnce;
expect(Outer.prototype.componentWillUnmount, 'outer initial').not.to.have
.been.called;
// inner should only have been mounted once
expect(Inner.prototype.componentWillMount, 'inner initial').to.have.been
.calledOnce;
expect(Inner.prototype.componentDidMount, 'inner initial').to.have.been
.calledOnce;
expect(Inner.prototype.componentWillUnmount, 'inner initial').not.to.have
.been.called;
outer.setState({ child: Inner2 });
outer.forceUpdate();
rerender();
expect(Inner2.prototype.render).to.have.been.calledOnce;
// outer should still only have been mounted once
expect(Outer.prototype.componentWillMount, 'outer swap').to.have.been
.calledOnce;
expect(Outer.prototype.componentDidMount, 'outer swap').to.have.been
.calledOnce;
expect(Outer.prototype.componentWillUnmount, 'outer swap').not.to.have
.been.called;
// inner should only have been mounted once
expect(Inner2.prototype.componentWillMount, 'inner2 swap').to.have.been
.calledOnce;
expect(Inner2.prototype.componentDidMount, 'inner2 swap').to.have.been
.calledOnce;
expect(Inner2.prototype.componentWillUnmount, 'inner2 swap').not.to.have
.been.called;
inner2.forceUpdate();
rerender();
expect(Inner2.prototype.render, 'inner2 update').to.have.been.calledTwice;
expect(Inner2.prototype.componentWillMount, 'inner2 update').to.have.been
.calledOnce;
expect(Inner2.prototype.componentDidMount, 'inner2 update').to.have.been
.calledOnce;
expect(Inner2.prototype.componentWillUnmount, 'inner2 update').not.to.have
.been.called;
});
it('should remount when swapping between HOC child types', () => {
class Outer extends Component {
render({ child: Child }) {
return <Child />;
}
}
class Inner extends Component {
componentWillMount() {}
componentWillUnmount() {}
render() {
return <div class="inner">foo</div>;
}
}
spyAll(Inner.prototype);
const InnerFunc = () => <div class="inner-func">bar</div>;
render(<Outer child={Inner} />, scratch);
expect(Inner.prototype.componentWillMount, 'initial mount').to.have.been
.calledOnce;
expect(Inner.prototype.componentWillUnmount, 'initial mount').not.to.have
.been.called;
Inner.prototype.componentWillMount.resetHistory();
render(<Outer child={InnerFunc} />, scratch);
expect(Inner.prototype.componentWillMount, 'unmount').not.to.have.been
.called;
expect(Inner.prototype.componentWillUnmount, 'unmount').to.have.been
.calledOnce;
Inner.prototype.componentWillUnmount.resetHistory();
render(<Outer child={Inner} />, scratch);
expect(Inner.prototype.componentWillMount, 'remount').to.have.been
.calledOnce;
expect(Inner.prototype.componentWillUnmount, 'remount').not.to.have.been
.called;
});
});
describe('Component Nesting', () => {
let useIntermediary = false;
let createComponent = Intermediary => {
class C extends Component {
componentWillMount() {}
render({ children }) {
if (!useIntermediary) return children;
let I = useIntermediary === true ? Intermediary : useIntermediary;
return <I>{children}</I>;
}
}
spyAll(C.prototype);
return C;
};
let createFunction = () => sinon.spy(({ children }) => children);
let F1 = createFunction();
let F2 = createFunction();
let F3 = createFunction();
let C1 = createComponent(F1);
let C2 = createComponent(F2);
let C3 = createComponent(F3);
let reset = () =>
[C1, C2, C3]
.reduce(
(acc, c) =>
acc.concat(Object.keys(c.prototype).map(key => c.prototype[key])),
[F1, F2, F3]
)
.forEach(c => c.resetHistory());
it('should handle lifecycle for no intermediary in component tree', () => {
reset();
render(
<C1>
<C2>
<C3>Some Text</C3>
</C2>
</C1>,
scratch
);
expect(C1.prototype.componentWillMount, 'initial mount').to.have.been
.calledOnce;
expect(C2.prototype.componentWillMount, 'initial mount').to.have.been
.calledOnce;
expect(C3.prototype.componentWillMount, 'initial mount').to.have.been
.calledOnce;
reset();
render(
<C1>
<C2>Some Text</C2>
</C1>,
scratch
);
expect(C1.prototype.componentWillMount, 'unmount innermost, C1').not.to
.have.been.called;
expect(C2.prototype.componentWillMount, 'unmount innermost, C2').not.to
.have.been.called;
reset();
render(
<C1>
<C3>Some Text</C3>
</C1>,
scratch
);
expect(C1.prototype.componentWillMount, 'swap innermost').not.to.have.been
.called;
expect(C3.prototype.componentWillMount, 'swap innermost').to.have.been
.calledOnce;
reset();
render(
<C1>
<C2>
<C3>Some Text</C3>
</C2>
</C1>,
scratch
);
expect(C1.prototype.componentWillMount, 'inject between, C1').not.to.have
.been.called;
expect(C2.prototype.componentWillMount, 'inject between, C2').to.have.been
.calledOnce;
expect(C3.prototype.componentWillMount, 'inject between, C3').to.have.been
.calledOnce;
});
it('should handle lifecycle for nested intermediary functional components', () => {
useIntermediary = true;
render(<div />, scratch);
reset();
render(
<C1>
<C2>
<C3>Some Text</C3>
</C2>
</C1>,
scratch
);
expect(
C1.prototype.componentWillMount,
'initial mount w/ intermediary fn, C1'
).to.have.been.calledOnce;
expect(
C2.prototype.componentWillMount,
'initial mount w/ intermediary fn, C2'
).to.have.been.calledOnce;
expect(
C3.prototype.componentWillMount,
'initial mount w/ intermediary fn, C3'
).to.have.been.calledOnce;
reset();
render(
<C1>
<C2>Some Text</C2>
</C1>,
scratch
);
expect(
C1.prototype.componentWillMount,
'unmount innermost w/ intermediary fn, C1'
).not.to.have.been.called;
expect(
C2.prototype.componentWillMount,
'unmount innermost w/ intermediary fn, C2'
).not.to.have.been.called;
reset();
render(
<C1>
<C3>Some Text</C3>
</C1>,
scratch
);
expect(
C1.prototype.componentWillMount,
'swap innermost w/ intermediary fn'
).not.to.have.been.called;
expect(
C3.prototype.componentWillMount,
'swap innermost w/ intermediary fn'
).to.have.been.calledOnce;
reset();
render(
<C1>
<C2>
<C3>Some Text</C3>
</C2>
</C1>,
scratch
);
expect(
C1.prototype.componentWillMount,
'inject between, C1 w/ intermediary fn'
).not.to.have.been.called;
expect(
C2.prototype.componentWillMount,
'inject between, C2 w/ intermediary fn'
).to.have.been.calledOnce;
expect(
C3.prototype.componentWillMount,
'inject between, C3 w/ intermediary fn'
).to.have.been.calledOnce;
});
it('should render components by depth', () => {
let spy = sinon.spy();
let update;
class Child extends Component {
constructor(props) {
super(props);
update = () => {
this.props.update();
this.setState({});
};
}
render() {
spy();
let items = [];
for (let i = 0; i < this.props.items; i++) items.push(i);
return <div>{items.join(',')}</div>;
}
}
let i = 0;
class Parent extends Component {
render() {
return <Child items={++i} update={() => this.setState({})} />;
}
}
render(<Parent />, scratch);
expect(spy).to.be.calledOnce;
update();
rerender();
expect(spy).to.be.calledTwice;
});
it('should handle lifecycle for nested intermediary elements', () => {
useIntermediary = 'div';
render(<div />, scratch);
reset();
render(
<C1>
<C2>
<C3>Some Text</C3>
</C2>
</C1>,
scratch
);
expect(
C1.prototype.componentWillMount,
'initial mount w/ intermediary div, C1'
).to.have.been.calledOnce;
expect(
C2.prototype.componentWillMount,
'initial mount w/ intermediary div, C2'
).to.have.been.calledOnce;
expect(
C3.prototype.componentWillMount,
'initial mount w/ intermediary div, C3'
).to.have.been.calledOnce;
reset();
render(
<C1>
<C2>Some Text</C2>
</C1>,
scratch
);
expect(
C1.prototype.componentWillMount,
'unmount innermost w/ intermediary div, C1'
).not.to.have.been.called;
expect(
C2.prototype.componentWillMount,
'unmount innermost w/ intermediary div, C2'
).not.to.have.been.called;
reset();
render(
<C1>
<C3>Some Text</C3>
</C1>,
scratch
);
expect(
C1.prototype.componentWillMount,
'swap innermost w/ intermediary div'
).not.to.have.been.called;
expect(
C3.prototype.componentWillMount,
'swap innermost w/ intermediary div'
).to.have.been.calledOnce;
reset();
render(
<C1>
<C2>
<C3>Some Text</C3>
</C2>
</C1>,
scratch
);
expect(
C1.prototype.componentWillMount,
'inject between, C1 w/ intermediary div'
).not.to.have.been.called;
expect(
C2.prototype.componentWillMount,
'inject between, C2 w/ intermediary div'
).to.have.been.calledOnce;
expect(
C3.prototype.componentWillMount,
'inject between, C3 w/ intermediary div'
).to.have.been.calledOnce;
});
});
it('should set component._vnode._dom when sCU returns false', () => {
let parent;
class Parent extends Component {
render() {
parent = this;
return <Child />;
}
}
let condition = false;
let child;
class Child extends Component {
shouldComponentUpdate() {
return false;
}
render() {
child = this;
if (!condition) return null;
return <div class="child" />;
}
}
let app;
class App extends Component {
render() {
app = this;
return <Parent />;
}
}
render(<App />, scratch);
expect(child._vnode._dom).to.equalNode(child.base);
app.forceUpdate();
expect(child._vnode._dom).to.equalNode(child.base);
parent.setState({});
condition = true;
child.forceUpdate();
expect(child._vnode._dom).to.equalNode(child.base);
rerender();
expect(child._vnode._dom).to.equalNode(child.base);
condition = false;
app.setState({});
child.forceUpdate();
rerender();
expect(child._vnode._dom).to.equalNode(child.base);
});
it('should update old dom on forceUpdate in a lifecycle', () => {
let i = 0;
class App extends Component {
componentWillReceiveProps() {
this.forceUpdate();
}
render() {
if (i++ == 0) return <div>foo</div>;
return <div>bar</div>;
}
}
render(<App />, scratch);
render(<App />, scratch);
expect(scratch.innerHTML).to.equal('<div>bar</div>');
});
// preact/#1323
it('should handle hoisted component vnodes without DOM', () => {
let x = 0;
let mounted = '';
let unmounted = '';
let updateAppState;
class X extends Component {
constructor(props) {
super(props);
this.name = `${x++}`;
}
componentDidMount() {
mounted += `,${this.name}`;
}
componentWillUnmount() {
unmounted += `,${this.name}`;
}
render() {
return null;
}
}
// Statically create X element
const A = <X />;
class App extends Component {
constructor(props) {
super(props);
this.state = { i: 0 };
updateAppState = () => this.setState({ i: this.state.i + 1 });
}
render() {
return (
<div key={this.state.i}>
{A}
{A}
</div>
);
}
}
render(<App />, scratch);
updateAppState();
rerender();
updateAppState();
rerender();
expect(mounted).to.equal(',0,1,2,3,4,5');
expect(unmounted).to.equal(',0,1,2,3');
});
describe('c.base', () => {
/* eslint-disable lines-around-comment */
/** @type {import('../../src').Component} */
let parentDom1;
/** @type {import('../../src').Component} */
let parent1;
/** @type {import('../../src').Component} */
let parent2;
/** @type {import('../../src').Component} */
let maybe;
/** @type {import('../../src').Component} */
let child;
/** @type {import('../../src').Component} */
let sibling;
/** @type {import('../../src').Component} */
let nullInst;
/** @type {() => void} */
let toggleMaybeNull;
/** @type {() => void} */
let swapChildTag;
function ParentWithDom(props) {
parentDom1 = this;
return <div>{props.children}</div>;
}
class Parent1 extends Component {
render() {
parent1 = this;
return this.props.children;
}
}
function Parent2(props) {
parent2 = this;
return props.children;
}
class MaybeNull extends Component {
constructor(props) {
super(props);
maybe = this;
this.state = { active: props.active || false };
toggleMaybeNull = () =>
this.setState(prev => ({
active: !prev.active
}));
}
render() {
return this.state.active ? <div>maybe</div> : null;
}
}
class Child extends Component {
constructor(props) {
super(props);
child = this;
this.state = { tagName: 'p' };
swapChildTag = () =>
this.setState(prev => ({
tagName: prev.tagName == 'p' ? 'span' : 'p'
}));
}
render() {
return h(this.state.tagName, null, 'child');
}
}
function Sibling(props) {
sibling = this;
return <p />;
}
function Null() {
nullInst = this;
return null;
}
afterEach(() => {
parentDom1 = null;
parent1 = null;
parent2 = null;
child = null;
sibling = null;
});
it('should keep c.base up to date if a nested child component changes DOM nodes', () => {
render(
<ParentWithDom>
<Parent1>
<Parent2>
<Child />
</Parent2>
</Parent1>
</ParentWithDom>,
scratch
);
expect(scratch.innerHTML).to.equal('<div><p>child</p></div>');
expect(child.base).to.equalNode(scratch.firstChild.firstChild);
expect(parent2.base).to.equalNode(child.base);
expect(parent1.base).to.equalNode(child.base);
expect(parentDom1.base).to.equalNode(scratch.firstChild);
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal('<div><span>child</span></div>');
expect(child.base).to.equalNode(scratch.firstChild.firstChild);
expect(parent2.base).to.equalNode(child.base);
expect(parent1.base).to.equalNode(child.base);
expect(parentDom1.base).to.equalNode(scratch.firstChild);
});
it('should not update sibling c.base if child component changes DOM nodes', () => {
let s1 = {},
s2 = {},
s3 = {},
s4 = {};
render(
<Fragment>
<ParentWithDom>
<Parent1>
<Parent2>
<Child />
<Sibling ref={s1} />
</Parent2>
<Sibling ref={s2} />
</Parent1>
<Sibling ref={s3} />
</ParentWithDom>
<Sibling ref={s4} />
</Fragment>,
scratch
);
expect(scratch.innerHTML).to.equal(
'<div><p>child</p><p></p><p></p><p></p></div><p></p>'
);
expect(child.base).to.equalNode(scratch.firstChild.firstChild);
expect(parent2.base).to.equalNode(child.base);
expect(parent1.base).to.equalNode(child.base);
expect(parentDom1.base).to.equalNode(scratch.firstChild);
expect(s1.current.base).to.equalNode(scratch.firstChild.childNodes[1]);
expect(s2.current.base).to.equalNode(scratch.firstChild.childNodes[2]);
expect(s3.current.base).to.equalNode(scratch.firstChild.childNodes[3]);
expect(s4.current.base).to.equalNode(scratch.lastChild);
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal(
'<div><span>child</span><p></p><p></p><p></p></div><p></p>'
);
expect(child.base).to.equalNode(scratch.firstChild.firstChild);
expect(parent2.base).to.equalNode(child.base);
expect(parent1.base).to.equalNode(child.base);
expect(parentDom1.base).to.equalNode(scratch.firstChild);
expect(s1.current.base).to.equalNode(scratch.firstChild.childNodes[1]);
expect(s2.current.base).to.equalNode(scratch.firstChild.childNodes[2]);
expect(s3.current.base).to.equalNode(scratch.firstChild.childNodes[3]);
expect(s4.current.base).to.equalNode(scratch.lastChild);
});
it('should not update parent c.base if child component changes DOM nodes and it is not first child component', () => {
render(
<Parent1>
<Sibling />
<Child />
</Parent1>,
scratch
);
expect(scratch.innerHTML).to.equal('<p></p><p>child</p>');
expect(child.base).to.equalNode(scratch.lastChild);
expect(sibling.base).to.equalNode(scratch.firstChild);
expect(parent1.base).to.equalNode(sibling.base);
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal('<p></p><span>child</span>');
expect(child.base).to.equalNode(scratch.lastChild);
expect(sibling.base).to.equalNode(scratch.firstChild);
expect(parent1.base).to.equalNode(sibling.base);
});
it('should update parent c.base if child component changes DOM nodes and it is first non-null child component', () => {
render(
<Parent1>
<Null />
<Child />
<Sibling />
</Parent1>,
scratch
);
expect(scratch.innerHTML).to.equal('<p>child</p><p></p>');
expect(nullInst.base).to.equalNode(null);
expect(child.base).to.equalNode(scratch.firstChild);
expect(sibling.base).to.equalNode(scratch.lastChild);
expect(parent1.base).to.equalNode(child.base);
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal('<span>child</span><p></p>');
expect(nullInst.base).to.equalNode(null);
expect(child.base).to.equalNode(scratch.firstChild);
expect(sibling.base).to.equalNode(scratch.lastChild);
expect(parent1.base).to.equalNode(child.base);
});
it('should not update parent c.base if child component changes DOM nodes and a parent is not first child component', () => {
render(
<ParentWithDom>
<Parent1>
<Sibling />
<Parent2>
<Child />
</Parent2>
</Parent1>
</ParentWithDom>,
scratch
);
expect(scratch.innerHTML).to.equal('<div><p></p><p>child</p></div>');
expect(child.base).to.equalNode(scratch.firstChild.lastChild);
expect(parent2.base).to.equalNode(child.base);
expect(sibling.base).to.equalNode(scratch.firstChild.firstChild);
expect(parent1.base).to.equalNode(sibling.base);
expect(parentDom1.base).to.equalNode(scratch.firstChild);
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal(
'<div><p></p><span>child</span></div>'
);
expect(child.base).to.equalNode(scratch.firstChild.lastChild);
expect(parent2.base).to.equalNode(child.base);
expect(sibling.base).to.equalNode(scratch.firstChild.firstChild);
expect(parent1.base).to.equalNode(sibling.base);
expect(parentDom1.base).to.equalNode(scratch.firstChild);
});
it('should update parent c.base if first child becomes null', () => {
render(
<Parent1>
<MaybeNull active />
<Parent2>
<Child />
</Parent2>
</Parent1>,
scratch
);
expect(scratch.innerHTML).to.equal([div('maybe'), p('child')].join(''));
expect(maybe.base).to.equalNode(
scratch.firstChild,
'initial - maybe.base'
);
expect(child.base).to.equalNode(
scratch.lastChild,
'initial - child.base'
);
expect(parent2.base).to.equalNode(child.base, 'initial - parent2.base');
expect(parent1.base).to.equalNode(maybe.base, 'initial - parent1.base');
toggleMaybeNull();
rerender();
expect(scratch.innerHTML).to.equal([p('child')].join(''));
expect(maybe.base).to.equalNode(null, 'toggleMaybe - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'toggleMaybe - child.base'
);
expect(parent2.base).to.equalNode(
child.base,
'toggleMaybe - parent2.base'
);
expect(parent1.base).to.equalNode(
child.base,
'toggleMaybe - parent1.base'
);
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal([span('child')].join(''));
expect(maybe.base).to.equalNode(null, 'swapChildTag - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'swapChildTag - child.base'
);
expect(parent2.base).to.equalNode(
child.base,
'swapChildTag - parent2.base'
);
expect(parent1.base).to.equalNode(
child.base,
'swapChildTag - parent1.base'
);
});
it('should update parent c.base if first child becomes non-null', () => {
render(
<Parent1>
<MaybeNull />
<Parent2>
<Child />
</Parent2>
</Parent1>,
scratch
);
expect(scratch.innerHTML).to.equal([p('child')].join(''));
expect(maybe.base).to.equalNode(null, 'initial - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'initial - child.base'
);
expect(parent2.base).to.equalNode(child.base, 'initial - parent2.base');
expect(parent1.base).to.equalNode(child.base, 'initial - parent1.base');
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal([span('child')].join(''));
expect(maybe.base).to.equalNode(null, 'swapChildTag - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'swapChildTag - child.base'
);
expect(parent2.base).to.equalNode(
child.base,
'swapChildTag - parent2.base'
);
expect(parent1.base).to.equalNode(
child.base,
'swapChildTag - parent1.base'
);
toggleMaybeNull();
rerender();
expect(scratch.innerHTML).to.equal(
[div('maybe'), span('child')].join('')
);
expect(maybe.base).to.equalNode(
scratch.firstChild,
'toggleMaybe - maybe.base'
);
expect(child.base).to.equalNode(
scratch.lastChild,
'toggleMaybe - child.base'
);
expect(parent2.base).to.equalNode(
child.base,
'toggleMaybe - parent2.base'
);
expect(parent1.base).to.equalNode(
maybe.base,
'toggleMaybe - parent1.base'
);
});
it('should update parent c.base if first non-null child becomes null with multiple null siblings', () => {
render(
<Parent1>
<Null />
<Null />
<Parent2>
<MaybeNull active />
<Child />
</Parent2>
</Parent1>,
scratch
);
expect(scratch.innerHTML).to.equal([div('maybe'), p('child')].join(''));
expect(maybe.base).to.equalNode(
scratch.firstChild,
'initial - maybe.base'
);
expect(child.base).to.equalNode(
scratch.lastChild,
'initial - child.base'
);
expect(parent2.base).to.equalNode(maybe.base, 'initial - parent2.base');
expect(parent1.base).to.equalNode(maybe.base, 'initial - parent1.base');
toggleMaybeNull();
rerender();
expect(scratch.innerHTML).to.equal([p('child')].join(''));
expect(maybe.base).to.equalNode(null, 'toggleMaybe - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'toggleMaybe - child.base'
);
expect(parent2.base).to.equalNode(
child.base,
'toggleMaybe - parent2.base'
);
expect(parent1.base).to.equalNode(
child.base,
'toggleMaybe - parent1.base'
);
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal([span('child')].join(''));
expect(maybe.base).to.equalNode(null, 'swapChildTag - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'swapChildTag - child.base'
);
expect(parent2.base).to.equalNode(
child.base,
'swapChildTag - parent2.base'
);
expect(parent1.base).to.equalNode(
child.base,
'swapChildTag - parent1.base'
);
});
it('should update parent c.base if a null child returns DOM with multiple null siblings', () => {
render(
<Parent1>
<Null />
<Null />
<Parent2>
<MaybeNull />
<Child />
</Parent2>
</Parent1>,
scratch
);
expect(scratch.innerHTML).to.equal([p('child')].join(''));
expect(maybe.base).to.equalNode(null, 'initial - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'initial - child.base'
);
expect(parent2.base).to.equalNode(child.base, 'initial - parent2.base');
expect(parent1.base).to.equalNode(child.base, 'initial - parent1.base');
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal([span('child')].join(''));
expect(maybe.base).to.equalNode(null, 'swapChildTag - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'swapChildTag - child.base'
);
expect(parent2.base).to.equalNode(
child.base,
'swapChildTag - parent2.base'
);
expect(parent1.base).to.equalNode(
child.base,
'swapChildTag - parent1.base'
);
toggleMaybeNull();
rerender();
expect(scratch.innerHTML).to.equal(
[div('maybe'), span('child')].join('')
);
expect(maybe.base).to.equalNode(
scratch.firstChild,
'toggleMaybe - maybe.base'
);
expect(child.base).to.equalNode(
scratch.lastChild,
'toggleMaybe - child.base'
);
expect(parent2.base).to.equalNode(
maybe.base,
'toggleMaybe - parent2.base'
);
expect(parent1.base).to.equalNode(
maybe.base,
'toggleMaybe - parent1.base'
);
});
it('should update parent c.base to null if last child becomes null', () => {
let fragRef = {};
render(
<Fragment ref={fragRef}>
<Parent1>
<Null />
<Null />
<Parent2>
<MaybeNull active />
</Parent2>
<Null />
</Parent1>
<Child />
</Fragment>,
scratch
);
expect(scratch.innerHTML).to.equal([div('maybe'), p('child')].join(''));
expect(maybe.base).to.equalNode(
scratch.firstChild,
'initial - maybe.base'
);
expect(child.base).to.equalNode(
scratch.lastChild,
'initial - child.base'
);
expect(parent2.base).to.equalNode(maybe.base, 'initial - parent2.base');
expect(parent1.base).to.equalNode(maybe.base, 'initial - parent1.base');
expect(fragRef.current.base).to.equalNode(
maybe.base,
'initial - fragRef.current.base'
);
toggleMaybeNull();
rerender();
expect(scratch.innerHTML).to.equal([p('child')].join(''));
expect(maybe.base).to.equalNode(null, 'toggleMaybe - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'toggleMaybe - child.base'
);
expect(parent2.base).to.equalNode(
maybe.base,
'toggleMaybe - parent2.base'
);
expect(parent1.base).to.equalNode(
maybe.base,
'toggleMaybe - parent1.base'
);
expect(fragRef.current.base).to.equalNode(
child.base,
'toggleMaybe - fragRef.current.base'
);
});
it('should update parent c.base if last child returns dom', () => {
let fragRef = {};
render(
<Fragment ref={fragRef}>
<Parent1>
<Null />
<Null />
<Parent2>
<MaybeNull />
</Parent2>
<Null />
</Parent1>
<Child />
</Fragment>,
scratch
);
expect(scratch.innerHTML).to.equal([p('child')].join(''));
expect(maybe.base).to.equalNode(null, 'initial - maybe.base');
expect(child.base).to.equalNode(
scratch.firstChild,
'initial - child.base'
);
expect(parent2.base).to.equalNode(maybe.base, 'initial - parent2.base');
expect(parent1.base).to.equalNode(maybe.base, 'initial - parent1.base');
expect(fragRef.current.base).to.equalNode(
child.base,
'initial - fragRef.current.base'
);
toggleMaybeNull();
rerender();
expect(scratch.innerHTML).to.equal([div('maybe'), p('child')].join(''));
expect(maybe.base).to.equalNode(
scratch.firstChild,
'toggleMaybe - maybe.base'
);
expect(child.base).to.equalNode(
scratch.lastChild,
'toggleMaybe - child.base'
);
expect(parent2.base).to.equalNode(maybe.base, 'initial - parent2.base');
expect(parent1.base).to.equalNode(
maybe.base,
'toggleMaybe - parent1.base'
);
expect(fragRef.current.base).to.equalNode(
maybe.base,
'toggleMaybe - fragRef.current.base'
);
});
it('should not update parent if it is a DOM node', () => {
let divVNode = (
<div>
<Child />
</div>
);
render(divVNode, scratch);
expect(scratch.innerHTML).to.equal('<div><p>child</p></div>');
expect(divVNode._dom).to.equalNode(
scratch.firstChild,
'initial - divVNode._dom'
);
expect(child.base).to.equalNode(
scratch.firstChild.firstChild,
'initial - child.base'
);
swapChildTag();
rerender();
expect(scratch.innerHTML).to.equal('<div><span>child</span></div>');
expect(divVNode._dom).to.equalNode(
scratch.firstChild,
'swapChildTag - divVNode._dom'
);
expect(child.base).to.equalNode(
scratch.firstChild.firstChild,
'swapChildTag - child.base'
);
});
});
describe('setState', () => {
it('should not error if called on an unmounted component', () => {
/** @type {() => void} */
let increment;
class Foo extends Component {
constructor(props) {
super(props);
this.state = { count: 0 };
increment = () => this.setState({ count: this.state.count + 1 });
}
render(props, state) {
return <div>{state.count}</div>;
}
}
render(<Foo />, scratch);
expect(scratch.innerHTML).to.equal('<div>0</div>');
increment();
rerender();
expect(scratch.innerHTML).to.equal('<div>1</div>');
render(null, scratch);
expect(scratch.innerHTML).to.equal('');
expect(() => increment()).to.not.throw();
expect(() => rerender()).to.not.throw();
expect(scratch.innerHTML).to.equal('');
});
});
describe('forceUpdate', () => {
it('should not error if called on an unmounted component', () => {
/** @type {() => void} */
let forceUpdate;
class Foo extends Component {
constructor(props) {
super(props);
forceUpdate = () => this.forceUpdate();
}
render(props, state) {
return <div>Hello</div>;
}
}
render(<Foo />, scratch);
expect(scratch.innerHTML).to.equal('<div>Hello</div>');
render(null, scratch);
expect(scratch.innerHTML).to.equal('');
expect(() => forceUpdate()).to.not.throw();
expect(() => rerender()).to.not.throw();
expect(scratch.innerHTML).to.equal('');
});
});
});
| 1 | 14,726 | Removed this copy of the `spyAll` function and replaced it with the same function declared in `helpers.js`. Same for `sortAttributes` below | preactjs-preact | js |
@@ -90,9 +90,9 @@ public class KubernetesContainerizedImpl extends EventHandler implements Contain
public static final String DEFAULT_POD_NAME_PREFIX = "fc-dep";
public static final String DEFAULT_SERVICE_NAME_PREFIX = "fc-svc";
public static final String DEFAULT_CLUSTER_NAME = "azkaban";
- public static final String CPU_LIMIT = "4";
+ public static final String DEFAULT_MAX_CPU = "0";
+ public static final String DEFAULT_MAX_MEMORY = "0Gi";
public static final String DEFAULT_CPU_REQUEST = "1";
- public static final String MEMORY_LIMIT = "64Gi";
public static final String DEFAULT_MEMORY_REQUEST = "2Gi";
public static final String MAPPING = "Mapping";
public static final String SERVICE_API_VERSION_2 = "ambassador/v2"; | 1 | /*
* Copyright 2020 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor.container;
import static azkaban.executor.ExecutionControllerUtils.clusterQualifiedExecId;
import static java.util.Objects.requireNonNull;
import azkaban.Constants;
import azkaban.Constants.ConfigurationKeys;
import azkaban.Constants.ContainerizedDispatchManagerProperties;
import azkaban.Constants.FlowParameters;
import azkaban.container.models.AzKubernetesV1PodBuilder;
import azkaban.container.models.AzKubernetesV1PodTemplate;
import azkaban.container.models.AzKubernetesV1ServiceBuilder;
import azkaban.container.models.AzKubernetesV1SpecBuilder;
import azkaban.container.models.ImagePullPolicy;
import azkaban.container.models.InitContainerType;
import azkaban.container.models.PodTemplateMergeUtils;
import azkaban.event.Event;
import azkaban.event.EventData;
import azkaban.event.EventHandler;
import azkaban.event.EventListener;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.Status;
import azkaban.executor.container.watch.KubernetesWatch;
import azkaban.imagemgmt.models.ImageVersion.State;
import azkaban.imagemgmt.rampup.ImageRampupManager;
import azkaban.imagemgmt.version.VersionInfo;
import azkaban.imagemgmt.version.VersionSet;
import azkaban.imagemgmt.version.VersionSetBuilder;
import azkaban.imagemgmt.version.VersionSetLoader;
import azkaban.metrics.ContainerizationMetrics;
import azkaban.project.ProjectLoader;
import azkaban.spi.EventType;
import azkaban.utils.Props;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import io.kubernetes.client.openapi.ApiClient;
import io.kubernetes.client.openapi.ApiException;
import io.kubernetes.client.openapi.apis.CoreV1Api;
import io.kubernetes.client.openapi.models.V1DeleteOptions;
import io.kubernetes.client.openapi.models.V1Pod;
import io.kubernetes.client.openapi.models.V1PodSpec;
import io.kubernetes.client.openapi.models.V1Service;
import io.kubernetes.client.openapi.models.V1Status;
import io.kubernetes.client.util.ClientBuilder;
import io.kubernetes.client.util.KubeConfig;
import io.kubernetes.client.util.Yaml;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is Kubernetes based implementation for containerization. It has implementation for
* creation/deletion of Pod and service. For any execution, it will identify version set and create
* a pod for all the valid jobTypes of a flow.
*/
@Singleton
public class KubernetesContainerizedImpl extends EventHandler implements ContainerizedImpl {
public static final String DEFAULT_FLOW_CONTAINER_NAME_PREFIX = "az-flow-container";
public static final String DEFAULT_POD_NAME_PREFIX = "fc-dep";
public static final String DEFAULT_SERVICE_NAME_PREFIX = "fc-svc";
public static final String DEFAULT_CLUSTER_NAME = "azkaban";
public static final String CPU_LIMIT = "4";
public static final String DEFAULT_CPU_REQUEST = "1";
public static final String MEMORY_LIMIT = "64Gi";
public static final String DEFAULT_MEMORY_REQUEST = "2Gi";
public static final String MAPPING = "Mapping";
public static final String SERVICE_API_VERSION_2 = "ambassador/v2";
public static final String DEFAULT_INIT_MOUNT_PATH_PREFIX_FOR_JOBTYPES = "/data/jobtypes";
public static final String DEFAULT_APP_MOUNT_PATH_PREFIX_FOR_JOBTYPES =
"/export/apps/azkaban/azkaban-exec-server/current/plugins/jobtypes";
public static final String DEFAULT_INIT_MOUNT_PATH_PREFIX_FOR_DEPENDENCIES = "/data/dependencies";
public static final String DEFAULT_APP_MOUNT_PATH_PREFIX_FOR_DEPENDENCIES =
"/export/apps/azkaban/azkaban-exec-server/current/plugins/dependencies";
public static final String IMAGE = "image";
public static final String VERSION = "version";
public static final String DEFAULT_SECRET_NAME = "azkaban-k8s-secret";
public static final String DEFAULT_SECRET_VOLUME = DEFAULT_SECRET_NAME;
public static final String DEFAULT_SECRET_MOUNTPATH = "/var/azkaban/private";
public static final String SERVICE_SELECTOR_PREFIX = "flow";
public static final String POD_APPLICATION_TAG = "azkaban-exec-server";
public static final String CLUSTER_LABEL_NAME = "cluster";
public static final String APP_LABEL_NAME = "app";
public static final String EXECUTION_ID_LABEL_NAME = "execution-id";
public static final String EXECUTION_ID_LABEL_PREFIX = "execid-";
public static final String DISABLE_CLEANUP_LABEL_NAME = "cleanup-disabled";
public static final String DEFAULT_AZKABAN_BASE_IMAGE_NAME = "azkaban-base";
public static final String DEFAULT_AZKABAN_CONFIG_IMAGE_NAME = "azkaban-config";
private final String namespace;
private final ApiClient client;
private final CoreV1Api coreV1Api;
private final Props azkProps;
private final ExecutorLoader executorLoader;
private final String podPrefix;
private final String servicePrefix;
private final String clusterName;
private final String clusterEnv;
private final String flowContainerName;
private final String cpuLimit;
private final String cpuRequest;
private final String memoryLimit;
private final String memoryRequest;
private final int servicePort;
private final long serviceTimeout;
private final VersionSetLoader versionSetLoader;
private final ImageRampupManager imageRampupManager;
private final KubernetesWatch kubernetesWatch;
private final String initMountPathPrefixForJobtypes;
private final String appMountPathPrefixForJobtypes;
private final Set<String> dependencyTypes;
private final String initMountPathPrefixForDependencies;
private final String appMountPathPrefixForDependencies;
private static final Set<String> INCLUDED_JOB_TYPES = new TreeSet<>(
String.CASE_INSENSITIVE_ORDER);
private final String secretName;
private final String secretVolume;
private final String secretMountpath;
private final String podTemplatePath;
private final EventListener eventListener;
private final ContainerizationMetrics containerizationMetrics;
private final String azkabanBaseImageName;
private final String azkabanConfigImageName;
private final ProjectLoader projectLoader;
private static final Logger logger = LoggerFactory
.getLogger(KubernetesContainerizedImpl.class);
@Inject
public KubernetesContainerizedImpl(final Props azkProps,
final ExecutorLoader executorLoader,
final VersionSetLoader versionSetLoader,
final ImageRampupManager imageRampupManager,
final KubernetesWatch kubernetesWatch,
final EventListener eventListener,
final ContainerizationMetrics containerizationMetrics,
final ProjectLoader projectLoader)
throws ExecutorManagerException {
this.azkProps = azkProps;
this.executorLoader = executorLoader;
this.versionSetLoader = versionSetLoader;
this.imageRampupManager = imageRampupManager;
this.kubernetesWatch = kubernetesWatch;
this.eventListener = eventListener;
this.containerizationMetrics = containerizationMetrics;
this.projectLoader = projectLoader;
this.addListener(this.eventListener);
this.namespace = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_NAMESPACE);
this.flowContainerName =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_NAME
, DEFAULT_FLOW_CONTAINER_NAME_PREFIX);
this.podPrefix =
this.azkProps.getString(ContainerizedDispatchManagerProperties.KUBERNETES_POD_NAME_PREFIX,
DEFAULT_POD_NAME_PREFIX);
this.servicePrefix = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_NAME_PREFIX,
DEFAULT_SERVICE_NAME_PREFIX);
this.clusterName = this.azkProps.getString(ConfigurationKeys.AZKABAN_CLUSTER_NAME,
DEFAULT_CLUSTER_NAME);
// This is utilized to set AZ_CLUSTER ENV variable to the POD containers.
this.clusterEnv = this.azkProps.getString(ConfigurationKeys.AZKABAN_CLUSTER_ENV,
this.clusterName);
this.cpuLimit = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_CPU_LIMIT,
CPU_LIMIT);
this.cpuRequest = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_CPU_REQUEST,
DEFAULT_CPU_REQUEST);
this.memoryLimit = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_MEMORY_LIMIT,
MEMORY_LIMIT);
this.memoryRequest = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_MEMORY_REQUEST,
DEFAULT_MEMORY_REQUEST);
this.servicePort =
this.azkProps.getInt(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_PORT,
54343);
this.serviceTimeout =
this.azkProps
.getLong(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_CREATION_TIMEOUT_MS,
60000);
this.initMountPathPrefixForJobtypes =
this.azkProps
.getString(
ContainerizedDispatchManagerProperties.KUBERNETES_INIT_MOUNT_PATH_FOR_JOBTYPES,
DEFAULT_INIT_MOUNT_PATH_PREFIX_FOR_JOBTYPES);
this.appMountPathPrefixForJobtypes =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_MOUNT_PATH_FOR_JOBTYPES,
DEFAULT_APP_MOUNT_PATH_PREFIX_FOR_JOBTYPES);
this.dependencyTypes =
new TreeSet<>(this.azkProps
.getStringList(ContainerizedDispatchManagerProperties.KUBERNETES_DEPENDENCY_TYPES));
this.initMountPathPrefixForDependencies =
this.azkProps
.getString(
ContainerizedDispatchManagerProperties.KUBERNETES_INIT_MOUNT_PATH_FOR_DEPENDENCIES,
DEFAULT_INIT_MOUNT_PATH_PREFIX_FOR_DEPENDENCIES);
this.appMountPathPrefixForDependencies =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_MOUNT_PATH_FOR_DEPENDENCIES,
DEFAULT_APP_MOUNT_PATH_PREFIX_FOR_DEPENDENCIES);
this.secretName = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_SECRET_NAME,
DEFAULT_SECRET_NAME);
this.secretVolume = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_SECRET_VOLUME,
DEFAULT_SECRET_VOLUME);
this.secretMountpath = this.azkProps
.getString(
ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_SECRET_MOUNTPATH,
DEFAULT_SECRET_MOUNTPATH);
this.podTemplatePath = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_POD_TEMPLATE_PATH,
StringUtils.EMPTY);
this.azkabanBaseImageName = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_POD_AZKABAN_BASE_IMAGE_NAME,
DEFAULT_AZKABAN_BASE_IMAGE_NAME);
this.azkabanConfigImageName = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_POD_AZKABAN_CONFIG_IMAGE_NAME,
DEFAULT_AZKABAN_CONFIG_IMAGE_NAME);
try {
// Path to the configuration file for Kubernetes which contains information about
// Kubernetes API Server and identity for authentication
final String kubeConfigPath = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_KUBE_CONFIG_PATH);
logger.info("Kube config path is : {}", kubeConfigPath);
this.client =
ClientBuilder.kubeconfig(KubeConfig.loadKubeConfig(
Files.newBufferedReader(Paths.get(kubeConfigPath), Charset.defaultCharset())))
.build();
this.coreV1Api = new CoreV1Api(this.client);
} catch (final IOException exception) {
logger.error("Unable to read kube config file: {}", exception.getMessage());
throw new ExecutorManagerException(exception);
}
// Add all the job types that are readily available as part of azkaban base image.
this.addIncludedJobTypes();
}
/**
* Populate the included job types set with all the types that are readily available as part of
* azkaban base image.
*/
private void addIncludedJobTypes() {
INCLUDED_JOB_TYPES.add("hadoopJava");
INCLUDED_JOB_TYPES.add("hadoopShell");
INCLUDED_JOB_TYPES.add("hive");
INCLUDED_JOB_TYPES.add("java");
INCLUDED_JOB_TYPES.add("java2");
INCLUDED_JOB_TYPES.add("pig");
INCLUDED_JOB_TYPES.add("pigLi");
INCLUDED_JOB_TYPES.add("command");
INCLUDED_JOB_TYPES.add("javaprocess");
INCLUDED_JOB_TYPES.add("noop");
}
/**
* Check if job type contains in the included job types. If not check if the job type starts with
* the any of the job types present in the included job type set. For example, in case of pig job
* type it can contain version such as pigLi-0.11.1. This is nothing but pointing to the different
* installation pig job. Hence, it just matches the prefix i.e. pigLi which is the actual job type
* name.
*
* @param jobType
* @return boolean
*/
private boolean isPresentInIncludedJobTypes(final String jobType) {
if (INCLUDED_JOB_TYPES.contains(jobType)) {
return true;
} else {
return isStartWithIncludedJobTypes(jobType);
}
}
/**
* Check if the job type starts with the aay of the job types present in the included job type
* set. For example, in case of pig job type it can contain version such as pigLi-0.11.1. This is
* nothing but pointing to the different installation pig job. Hence, it just matches the prefix
* i.e. pigLi which is the actual job type name.
*
* @param jobType
* @return boolean
*/
private boolean isStartWithIncludedJobTypes(final String jobType) {
for (final String includedJobType : INCLUDED_JOB_TYPES) {
if (jobType.toLowerCase().startsWith(includedJobType.toLowerCase())) {
return true;
}
}
return false;
}
/**
* Filter out the included job types from the given job types.
*
* @param jobTypes
* @return Set<String>
*/
private Set<String> filterIncludedJobTypes(final Set<String> jobTypes) {
return jobTypes.stream()
.filter(jobType -> !isPresentInIncludedJobTypes(jobType))
.collect(Collectors.toSet());
}
/**
* This method is used to create container during dispatch of execution. It will create pod for a
* flow execution. It will also create a service for a pod if azkaban.kubernetes.service .required
* property is set.
*
* @param executionId
* @throws ExecutorManagerException
*/
@Override
public void createContainer(final int executionId) throws ExecutorManagerException {
createPod(executionId);
if (isServiceRequired()) {
createService(executionId);
}
}
/**
* This method is used to delete container. It will delete pod for a flow execution. If the
* service was created then it will also delete the service. This method can be called as a part
* of cleanup process for containers in case containers didn't shutdown gracefully.
*
* @param executionId
* @throws ExecutorManagerException
*/
@Override
public void deleteContainer(final int executionId) throws ExecutorManagerException {
deletePod(executionId);
if (isServiceRequired()) {
deleteService(executionId);
}
}
/**
* Construct the flow override parameter (key) for image version.
*
* @param imageType
* @return flow override param
*/
private String imageTypeOverrideParam(final String imageType) {
return String.join(".", IMAGE, imageType, VERSION);
}
/**
* This method fetches the complete version set information (Map of jobs and their versions)
* required to run the flow.
*
* @param flowParams Set of flow properties and flow parameters
* @param imageTypesUsedInFlow
* @return VersionSet
* @throws ExecutorManagerException
*/
@VisibleForTesting
VersionSet fetchVersionSet(final int executionId, final Map<String, String> flowParams,
Set<String> imageTypesUsedInFlow, final ExecutableFlow executableFlow)
throws ExecutorManagerException {
VersionSet versionSet = null;
try {
if (flowParams != null &&
flowParams.containsKey(Constants.FlowParameters.FLOW_PARAM_VERSION_SET_ID)) {
final int versionSetId = Integer.parseInt(flowParams
.get(Constants.FlowParameters.FLOW_PARAM_VERSION_SET_ID));
try {
versionSet = this.versionSetLoader.getVersionSetById(versionSetId).get();
// Validate if the versionSet contains valid version. If not update the correct
// version using rampup and active image version information.
final Map<String, VersionInfo> updatedVersionInfoMap =
this.imageRampupManager.validateAndGetUpdatedVersionMap(executableFlow, versionSet);
if (!updatedVersionInfoMap.isEmpty()) {
// Rebuild version set with correct version
final VersionSetBuilder versionSetBuilder = new VersionSetBuilder(
this.versionSetLoader);
versionSetBuilder.addElements(updatedVersionInfoMap);
versionSet = versionSetBuilder.build();
}
/*
* Validate that all images part of the flow are included in the retrieved
* VersionSet. If there are images that were not part of the retrieved version
* set, then create a new VersionSet with a superset of all images.
*/
final Set<String> imageVersionsNotFound = new TreeSet<>();
final Map<String, VersionInfo> overlayMap = new HashMap<>();
for (final String imageType : imageTypesUsedInFlow) {
if (flowParams.containsKey(imageTypeOverrideParam(imageType))) {
// Fetches the user overridden version from the database and this will make sure if
// the overridden version exists/registered on Azkaban database. Hence, it follows a
// fail fast mechanism to throw exception if the version does not exist for the
// given image type.
final VersionInfo versionInfo = this.imageRampupManager.getVersionInfo(imageType,
flowParams.get(imageTypeOverrideParam(imageType)),
State.getNewAndActiveStateFilter());
overlayMap.put(imageType, versionInfo);
logger.info("User overridden image type {} of version {} is used", imageType,
versionInfo.getVersion());
} else if (!(isPresentInIncludedJobTypes(imageType) || versionSet.getVersion(imageType)
.isPresent())) {
logger.info("ExecId: {}, imageType: {} not found in versionSet {}",
executionId, imageType, versionSetId);
imageVersionsNotFound.add(imageType);
}
}
if (!(imageVersionsNotFound.isEmpty() && overlayMap.isEmpty())) {
// Populate a new Version Set
logger.info("ExecId: {}, Flow had more imageTypes than specified in versionSet {}. "
+ "Constructing a new one", executionId, versionSetId);
final VersionSetBuilder versionSetBuilder = new VersionSetBuilder(
this.versionSetLoader);
versionSetBuilder.addElements(versionSet.getImageToVersionMap());
// The following is a safety check. Just in case: getVersionByImageTypes fails below due to an
// exception, we will have an incomplete/incorrect versionSet. Setting it null ensures, it will
// be processed from scratch in the following code block
versionSet = null;
if (!imageVersionsNotFound.isEmpty()) {
versionSetBuilder.addElements(
this.imageRampupManager
.getVersionByImageTypes(executableFlow, imageVersionsNotFound,
overlayMap.keySet()));
}
if (!overlayMap.isEmpty()) {
versionSetBuilder.addElements(overlayMap);
}
versionSet = versionSetBuilder.build();
}
} catch (final Exception e) {
logger.error("ExecId: {}, Could not find version set id: {} as specified by flow params. "
+ "Will continue by creating a new one.", executionId, versionSetId);
}
}
if (versionSet == null) {
// Need to build a version set
// Filter all the job types available in azkaban base image from the input image types set
imageTypesUsedInFlow = this.filterIncludedJobTypes(imageTypesUsedInFlow);
// Now we will check the flow params for any override versions provided and apply them
final Map<String, VersionInfo> overlayMap = new HashMap<>();
for (final String imageType : imageTypesUsedInFlow) {
final String imageTypeVersionOverrideParam = imageTypeOverrideParam(imageType);
VersionInfo versionInfo;
if (flowParams != null && flowParams.containsKey(imageTypeVersionOverrideParam)) {
// Fetches the user overridden version from the database and this will make sure if
// the overridden version exists/registered on Azkaban database. Hence, it follows a
// fail fast mechanism to throw exception if the version does not exist for the
// given image type.
// Allow test version override if allow.test.version flow parameter is set to true
if (flowParams.containsKey(FlowParameters.FLOW_PARAM_ALLOW_IMAGE_TEST_VERSION) &&
Boolean.TRUE.equals(Boolean
.valueOf(flowParams.get(FlowParameters.FLOW_PARAM_ALLOW_IMAGE_TEST_VERSION)))) {
versionInfo = this.imageRampupManager.getVersionInfo(imageType,
flowParams.get(imageTypeVersionOverrideParam),
State.getNewActiveAndTestStateFilter());
overlayMap.put(imageType, versionInfo);
logger.info("User overridden image type {} of version {} is used", imageType,
versionInfo.getVersion());
} else {
versionInfo = this.imageRampupManager.getVersionInfo(imageType,
flowParams.get(imageTypeVersionOverrideParam),
State.getNewAndActiveStateFilter());
overlayMap.put(imageType, versionInfo);
logger.info("User overridden image type {} of version {} is used", imageType,
versionInfo.getVersion());
}
}
}
final Map<String, VersionInfo> versionMap =
this.imageRampupManager.getVersionByImageTypes(executableFlow, imageTypesUsedInFlow,
overlayMap.keySet());
final VersionSetBuilder versionSetBuilder = new VersionSetBuilder(this.versionSetLoader);
versionSetBuilder.addElements(versionMap);
versionSet = versionSetBuilder.addElements(overlayMap).build();
}
} catch (final IOException e) {
logger.error("ExecId: {}, Exception in fetching the VersionSet. Error msg: {}",
executionId, e.getMessage());
throw new ExecutorManagerException(e);
}
return versionSet;
}
/**
* @param executionId
* @param versionSet
* @param jobTypes
* @param dependencyTypes
* @return
* @throws ExecutorManagerException
*/
@VisibleForTesting
V1PodSpec createPodSpec(final int executionId, final VersionSet versionSet,
final SortedSet<String> jobTypes, final Set<String> dependencyTypes,
final Map<String, String> flowParam)
throws ExecutorManagerException {
// Gets azkaban base image full path containing version.
final String azkabanBaseImageFullPath = getAzkabanBaseImageFullPath(versionSet);
// TODO: check if we need full path for config as well.
final String azkabanConfigVersion = getAzkabanConfigVersion(versionSet);
// Get CPU and memory requested for a flow container
final String flowContainerCPURequest = getFlowContainerCPURequest(flowParam);
final String flowContainerMemoryRequest = getFlowContainerMemoryRequest(flowParam);
final AzKubernetesV1SpecBuilder v1SpecBuilder =
new AzKubernetesV1SpecBuilder(this.clusterEnv, Optional.empty())
.addFlowContainer(this.flowContainerName,
azkabanBaseImageFullPath, ImagePullPolicy.IF_NOT_PRESENT, azkabanConfigVersion)
.withResources(this.cpuLimit, flowContainerCPURequest, this.memoryLimit,
flowContainerMemoryRequest);
final Map<String, String> envVariables = new HashMap<>();
envVariables.put(ContainerizedDispatchManagerProperties.ENV_VERSION_SET_ID,
String.valueOf(versionSet.getVersionSetId()));
envVariables.put(ContainerizedDispatchManagerProperties.ENV_FLOW_EXECUTION_ID,
String.valueOf(executionId));
setupJavaRemoteDebug(envVariables, flowParam);
setupDevPod(envVariables, flowParam);
setupPodEnvVariables(envVariables, flowParam);
// Add env variables to spec builder
addEnvVariablesToSpecBuilder(v1SpecBuilder, envVariables);
// Create init container yaml file for each jobType and dependency
addInitContainers(executionId, jobTypes, dependencyTypes, v1SpecBuilder, versionSet);
// Add volume with secrets mounted
addSecretVolume(v1SpecBuilder);
return v1SpecBuilder.build();
}
/**
* This method is used to get cpu request for a flow container. Precedence is defined below. a)
* Use CPU request set in flow parameter b) Use CPU request set in system properties or default
* which is set in @cpuRequest.
*
* @param flowParam
* @return CPU request for a flow container
*/
@VisibleForTesting
String getFlowContainerCPURequest(final Map<String, String> flowParam) {
if (flowParam != null && !flowParam.isEmpty() && flowParam
.containsKey(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_CPU_REQUEST)) {
return flowParam.get(Constants.FlowParameters.FLOW_PARAM_FLOW_CONTAINER_CPU_REQUEST);
}
return this.cpuRequest;
}
/**
* This method is used to get memory request for a flow container. Precedence is defined below. a)
* Use memory request set in flow parameter b) Use memory request set in system properties or
* default which is set in @memoryRequest
*
* @param flowParam
* @return Memory request for a flow container
*/
@VisibleForTesting
String getFlowContainerMemoryRequest(final Map<String, String> flowParam) {
if (flowParam != null && !flowParam.isEmpty() && flowParam
.containsKey(FlowParameters.FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST)) {
return flowParam.get(Constants.FlowParameters.FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST);
}
return this.memoryRequest;
}
/**
* This method is used to setup environment variable to enable remote debug on kubernetes flow
* container. Based on this environment variable, you can decide to enable or disable remote
* debug.
*
* @param envVariables
* @param flowParam
*/
private void setupJavaRemoteDebug(final Map<String, String> envVariables,
final Map<String, String> flowParam) {
if (flowParam != null && !flowParam.isEmpty() && flowParam
.containsKey(Constants.FlowParameters.FLOW_PARAM_JAVA_ENABLE_DEBUG)) {
envVariables.put(ContainerizedDispatchManagerProperties.ENV_JAVA_ENABLE_DEBUG,
flowParam.get(Constants.FlowParameters.FLOW_PARAM_JAVA_ENABLE_DEBUG));
}
}
/**
* This method is used to setup environment variable to enable pod as dev pod which can be helpful
* for testing. Based on this environment variable, you can decide to start the flow container or
* not.
*
* @param envVariables
* @param flowParam
*/
private void setupDevPod(final Map<String, String> envVariables,
final Map<String, String> flowParam) {
if (flowParam != null && !flowParam.isEmpty() && flowParam
.containsKey(FlowParameters.FLOW_PARAM_ENABLE_DEV_POD)) {
envVariables.put(ContainerizedDispatchManagerProperties.ENV_ENABLE_DEV_POD,
flowParam.get(FlowParameters.FLOW_PARAM_ENABLE_DEV_POD));
}
}
/**
* This method is used to setup any environment variable for a pod which can be passed from flow
* parameter. To provide the generic solution, it is adding all the flow parameters starting with
* (@FlowParameters.FLOW_PARAM_POD_ENV_VAR)
*
* @param envVariables
* @param flowParam
*/
void setupPodEnvVariables(final Map<String, String> envVariables,
final Map<String, String> flowParam) {
if (flowParam != null && !flowParam.isEmpty()) {
flowParam.forEach((k, v) -> {
if (k.startsWith(FlowParameters.FLOW_PARAM_POD_ENV_VAR)) {
envVariables
.put(StringUtils.removeStart(k, FlowParameters.FLOW_PARAM_POD_ENV_VAR).toUpperCase(),
v);
}
});
}
}
/**
* Adding environment variables in pod spec builder.
*
* @param v1SpecBuilder
* @param envVariables
*/
private void addEnvVariablesToSpecBuilder(final AzKubernetesV1SpecBuilder v1SpecBuilder,
final Map<String, String> envVariables) {
envVariables.forEach((key, value) -> v1SpecBuilder.addEnvVarToFlowContainer(key, value));
}
/**
* Disable auto-mounting of service account tokens.
*
* @param podSpec pod specification
*/
private void disableSATokenAutomount(V1PodSpec podSpec) {
podSpec.automountServiceAccountToken(false);
}
/**
* @param executionId
* @param podSpec
* @return
*/
@VisibleForTesting
V1Pod createPodFromSpec(final int executionId, final V1PodSpec podSpec, Map<String, String> flowParam) {
final ImmutableMap<String, String> labels = getLabelsForPod(executionId, flowParam);
final ImmutableMap<String, String> annotations = getAnnotationsForPod();
final V1Pod pod = new AzKubernetesV1PodBuilder(getPodName(executionId), this.namespace, podSpec)
.withPodLabels(labels)
.withPodAnnotations(annotations)
.build();
return pod;
}
/**
* This method is used to create pod. 1. Fetch jobTypes for the flow 2. Fetch flow parameters for
* version set and each image type if it is set. 3. If valid version set is provided then use
* versions from it. 4. If valid version set is not provided then call Ramp up manager API and get
* image version for each image type. 5. Add all the validation around a) whether version set is
* valid or not. b) If it is valid then is there any change in flow and new jobType is introduced
* after version set was created? If so, create new version set using versions mentioned in
* version set and ramp up for new jobType. 6. Create pod spec using all the version information
* 7. Insert version set into execution_flows tables for a reference 8. Emit version set as a part
* of flow life cycle event.
*
* @param executionId
* @throws ExecutorManagerException
*/
private void createPod(final int executionId) throws ExecutorManagerException {
// Fetch execution flow from execution Id.
final ExecutableFlow flow = this.executorLoader.fetchExecutableFlow(executionId);
// Step 1: Fetch set of jobTypes for a flow from executionId
final TreeSet<String> jobTypes = ContainerImplUtils.getJobTypesForFlow(flow);
logger
.info("ExecId: {}, Jobtypes for flow {} are: {}", executionId, flow.getFlowId(), jobTypes);
logger
.info("ExecId: {}, Dependencies for flow {} are: {}", executionId, flow.getFlowId(),
this.dependencyTypes);
Map<String, String> flowParam = null;
if (flow.getExecutionOptions() != null) {
flowParam = flow.getExecutionOptions().getFlowParameters();
}
if (flowParam != null && !flowParam.isEmpty()) {
logger.info("ExecId: {}, Flow Parameters are: {}", executionId, flowParam);
}
// Create all image types by adding azkaban base image, azkaban config and all job types for
// the flow.
final Set<String> allImageTypes = new TreeSet<>();
allImageTypes.add(this.azkabanBaseImageName);
allImageTypes.add(this.azkabanConfigImageName);
allImageTypes.addAll(jobTypes);
allImageTypes.addAll(this.dependencyTypes);
final VersionSet versionSet = fetchVersionSet(executionId,
flowParam, allImageTypes, flow);
final V1PodSpec podSpec = createPodSpec(executionId, versionSet, jobTypes, this.dependencyTypes, flowParam);
disableSATokenAutomount(podSpec);
// If a pod-template is provided, merge its component definitions into the podSpec.
if (StringUtils.isNotEmpty(this.podTemplatePath)) {
try {
final AzKubernetesV1PodTemplate podTemplate = AzKubernetesV1PodTemplate
.getInstance(this.podTemplatePath);
V1PodSpec podSpecFromTemplate = podTemplate.getPodSpecFromTemplate();
logPodSpecYaml(executionId, podSpecFromTemplate, flowParam, "ExecId: {}, PodSpec template "
+ "before merge: {}");
PodTemplateMergeUtils.mergePodSpec(podSpec, podSpecFromTemplate);
logPodSpecYaml(executionId, podSpecFromTemplate, flowParam, "ExecId: {}, PodSpec after "
+ "template merge: {}");
} catch (final IOException e) {
logger.info("ExecId: {}, Failed to create k8s pod from template: {}", executionId,
e.getMessage());
throw new ExecutorManagerException(e);
}
}
final V1Pod pod = createPodFromSpec(executionId, podSpec, flowParam);
logPodSpecYaml(executionId, pod, flowParam, "ExecId: {}, Pod: {}");
try {
this.coreV1Api.createNamespacedPod(this.namespace, pod, null, null, null);
logger.info("ExecId: {}, Dispatched pod for execution.", executionId);
} catch (final ApiException e) {
logger.error("ExecId: {}, Unable to create Pod: {}", executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
// Store version set id in execution_flows for execution_id
this.executorLoader.updateVersionSetId(executionId, versionSet.getVersionSetId());
// Marking flow as PREPARING from DISPATCHING as POD creation request is submitted
flow.setStatus(Status.PREPARING);
flow.setVersionSet(versionSet);
this.executorLoader.updateExecutableFlow(flow);
// Record time taken to dispatch flow to a container
if (flow.getSubmitTime()>0) {
final long containerDispatchDuration = System.currentTimeMillis() - flow.getSubmitTime();
this.containerizationMetrics.addTimeToDispatch(containerDispatchDuration);
}
// Emit preparing flow event with version set
this.fireEventListeners(Event.create(flow, EventType.FLOW_STATUS_CHANGED, new EventData(flow)));
}
/**
* This method is used to log pod spec yaml for debugging purpose. If Pod is marked as dev pod
* then pod spec yaml will be printed in logs for INFO level else it will be logged for DEBUG
* level.
* @param executionId
* @param podObject Pod/PodSpec depending on the log
* @param flowParam
*/
private static void logPodSpecYaml(final int executionId, final Object podObject,
final Map<String, String> flowParam, String message) {
final String podSpecYaml = Yaml.dump(podObject).trim();
if (flowParam != null && !flowParam.isEmpty() && flowParam
.containsKey(FlowParameters.FLOW_PARAM_ENABLE_DEV_POD)) {
logger.info(message, executionId, podSpecYaml);
} else {
logger.debug(message, executionId, podSpecYaml);
}
}
/**
* TODO: Get azkaban base image version from version set.
*
* @return
*/
private String getAzkabanBaseImageFullPath(final VersionSet versionSet) {
return versionSet.getVersion(this.azkabanBaseImageName).get().pathWithVersion();
}
private String getAzkabanConfigVersion(final VersionSet versionSet) {
return versionSet.getVersion(this.azkabanConfigImageName).get().getVersion();
}
/**
* Create labels that should be applied to the Pod.
*
* @return
*/
private ImmutableMap getLabelsForPod(final int executionId, Map<String, String> flowParam) {
final ImmutableMap.Builder mapBuilder = ImmutableMap.builder();
mapBuilder.put(CLUSTER_LABEL_NAME, this.clusterName);
mapBuilder.put(EXECUTION_ID_LABEL_NAME, EXECUTION_ID_LABEL_PREFIX + executionId);
mapBuilder.put(APP_LABEL_NAME, POD_APPLICATION_TAG);
// Note that the service label must match the selector used for the corresponding service
if (isServiceRequired()) {
mapBuilder.put("service", String.join("-", SERVICE_SELECTOR_PREFIX,
clusterQualifiedExecId(this.clusterName, executionId)));
}
// Set the label for disabling pod-cleanup.
if (flowParam != null && !flowParam.isEmpty() && flowParam
.containsKey(FlowParameters.FLOW_PARAM_DISABLE_POD_CLEANUP)) {
mapBuilder.put(DISABLE_CLEANUP_LABEL_NAME,
flowParam.get(FlowParameters.FLOW_PARAM_DISABLE_POD_CLEANUP));
}
return mapBuilder.build();
}
/**
* Get a {@code lableSelector} that can be used to list all the flow-container-pods for the
* current namespace.
* Example Selector: 'cluster=cluster1,app=azkaban-exec-server'
*
* @return label selector
*/
public static String getLabelSelector(final Props azkProps) {
requireNonNull(azkProps, "azkaban properties must not be null");
final String clusterName = azkProps.getString(ConfigurationKeys.AZKABAN_CLUSTER_NAME,
DEFAULT_CLUSTER_NAME);
final StringBuilder selectorBuilder = new StringBuilder();
selectorBuilder.append(CLUSTER_LABEL_NAME + "=" + clusterName).append(",")
.append(APP_LABEL_NAME + "=" + POD_APPLICATION_TAG);
return selectorBuilder.toString();
}
public String getNamespace() {
return this.namespace;
}
/**
* TODO: Add implementation to get annotations for Pod.
*
* @return
*/
private ImmutableMap getAnnotationsForPod() {
return ImmutableMap.of();
}
/**
* TODO: Check if we need to turn everything into lower case?
*
* @param executionId
* @param jobTypes
* @param dependencyTypes
* @param v1SpecBuilder
* @param versionSet
* @throws ExecutorManagerException
*/
private void addInitContainers(final int executionId,
final Set<String> jobTypes, final Set<String> dependencyTypes,
final AzKubernetesV1SpecBuilder v1SpecBuilder,
final VersionSet versionSet)
throws ExecutorManagerException {
for (final String jobType : jobTypes) {
// Skip all the job types that are available in the azkaban base image and create init
// container for the remaining job types.
if (isPresentInIncludedJobTypes(jobType)) {
continue;
}
try {
final String imageFullPath = versionSet.getVersion(jobType).get().pathWithVersion();
v1SpecBuilder.addInitContainerType(jobType, imageFullPath, ImagePullPolicy.IF_NOT_PRESENT,
String.join("/", this.initMountPathPrefixForJobtypes, jobType),
String.join("/", this.appMountPathPrefixForJobtypes, jobType), InitContainerType.JOBTYPE);
} catch (final Exception e) {
throw new ExecutorManagerException("Did not find the version string for image type: " +
jobType + " in versionSet");
}
}
for (final String dependency: dependencyTypes) {
try {
final String imageFullPath = versionSet.getVersion(dependency).get().pathWithVersion();
v1SpecBuilder
.addInitContainerType(dependency, imageFullPath, ImagePullPolicy.IF_NOT_PRESENT,
String.join("/", this.initMountPathPrefixForDependencies, dependency),
String.join("/", this.appMountPathPrefixForDependencies, dependency),
InitContainerType.DEPENDENCY);
} catch (final Exception e) {
throw new ExecutorManagerException("Did not find the version string for image type: " +
dependency + " in versionSet");
}
}
}
private void addSecretVolume(final AzKubernetesV1SpecBuilder v1SpecBuilder) {
v1SpecBuilder.addSecretVolume(this.secretVolume, this.secretName, this.secretMountpath);
}
/**
* This method is used to create service for flow container for execution id.
*
* @param executionId
* @throws ExecutorManagerException
*/
private void createService(final int executionId) throws ExecutorManagerException {
try {
final AzKubernetesV1ServiceBuilder azKubernetesV1ServiceBuilder =
new AzKubernetesV1ServiceBuilder(
"v1Service.yaml");
final V1Service serviceObject = azKubernetesV1ServiceBuilder
.withExecId(clusterQualifiedExecId(this.clusterName, executionId))
.withServiceName(getServiceName(executionId))
.withNamespace(this.namespace)
.withApiVersion(SERVICE_API_VERSION_2)
.withKind(MAPPING)
.withPort(String.valueOf(this.servicePort))
.withTimeoutMs(String.valueOf(this.serviceTimeout))
.build();
this.coreV1Api.createNamespacedService(this.namespace, serviceObject, null, null, null);
logger.info("ExecId: {}, Service is created.", executionId);
} catch (final IOException e) {
logger.error("ExecId: {}, Unable to create service in Kubernetes. Msg: {}", executionId,
e.getMessage());
throw new ExecutorManagerException(e);
} catch (final ApiException e) {
logger.error("ExecId: {}, Unable to create service in Kubernetes. Msg: {} ",
executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
}
/**
* This method is used to check whether service should be created in Kubernetes for flow container
* pod or not.
*
* @return
*/
private boolean isServiceRequired() {
return this.azkProps
.getBoolean(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_REQUIRED, false);
}
/**
* This method is used to delete pod in Kubernetes. It will terminate the pod. deployment is
* fixed
*
* @param executionId
* @throws ExecutorManagerException
*/
private void deletePod(final int executionId) throws ExecutorManagerException {
try {
final String podName = getPodName(executionId);
this.coreV1Api.deleteNamespacedPod(podName, this.namespace, null, null,
null, null, null, new V1DeleteOptions());
logger.info("ExecId: {}, Action: Pod Deletion, Pod Name: {}", executionId, podName);
} catch (final ApiException e) {
logger.error("ExecId: {}, Unable to delete Pod in Kubernetes: {}", executionId,
e.getResponseBody());
throw new ExecutorManagerException(e);
}
}
/**
* This method is used to delete service in Kubernetes which is created for Pod.
*
* @param executionId
* @throws ExecutorManagerException
*/
public void deleteService(final int executionId) throws ExecutorManagerException {
final String serviceName = getServiceName(executionId);
try {
final V1Status deleteResult = this.coreV1Api.deleteNamespacedService(
serviceName,
this.namespace,
null,
null,
null,
null,
null,
new V1DeleteOptions());
logger.info("ExecId: {}, Action: Service Deletion, Service Name: {}, code: {}, message: {}",
executionId,
serviceName,
deleteResult.getCode(),
deleteResult.getMessage());
} catch (final ApiException e) {
logger.error("ExecId: {}, Unable to delete service in Kubernetes: {}", executionId,
e.getResponseBody());
throw new ExecutorManagerException(e);
}
}
/**
* This method is used to get service name. It will be created using service name prefix, azkaban
* cluster name and execution id.
*
* @param executionId
* @return
*/
private String getServiceName(final int executionId) {
return String.join("-", this.servicePrefix, this.clusterName, String.valueOf(executionId));
}
/**
* This method is used to get name of Pod based on naming convention. It will be created using pod
* name prefix, azkaban cluster name and execution id.
*
* @param executionId
* @return
*/
private String getPodName(final int executionId) {
return String.join("-", this.podPrefix, this.clusterName, String.valueOf(executionId));
}
}
| 1 | 22,662 | Default max cpu should be 8 and memory 64GB | azkaban-azkaban | java |
@@ -156,6 +156,11 @@ class _MissingPandasLikeSeries(object):
real = unsupported_property(
'real',
reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead.")
+ nbytes = unsupported_property(
+ 'nbytes',
+ reason="'nbytes' requires to compute whole dataset. You can calculate manually it, "
+ "with its 'itemsize', by explicitly executing its count. Use Spark's web UI "
+ "to monitor disk and memory usage of your application in general.")
# Functions we won't support.
memory_usage = common.memory_usage(unsupported_function) | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from databricks.koalas.missing import _unsupported_function, _unsupported_property, common
def unsupported_function(method_name, deprecated=False, reason=""):
return _unsupported_function(class_name='pd.Series', method_name=method_name,
deprecated=deprecated, reason=reason)
def unsupported_property(property_name, deprecated=False, reason=""):
return _unsupported_property(class_name='pd.Series', property_name=property_name,
deprecated=deprecated, reason=reason)
class _MissingPandasLikeSeries(object):
# Properties
axes = unsupported_property('axes')
iat = unsupported_property('iat')
nbytes = unsupported_property('nbytes')
# Deprecated properties
blocks = unsupported_property('blocks', deprecated=True)
ftypes = unsupported_property('ftypes', deprecated=True)
ftype = unsupported_property('ftype', deprecated=True)
is_copy = unsupported_property('is_copy', deprecated=True)
ix = unsupported_property('ix', deprecated=True)
asobject = unsupported_property('asobject', deprecated=True)
strides = unsupported_property('strides', deprecated=True)
imag = unsupported_property('imag', deprecated=True)
itemsize = unsupported_property('itemsize', deprecated=True)
data = unsupported_property('data', deprecated=True)
base = unsupported_property('base', deprecated=True)
flags = unsupported_property('flags', deprecated=True)
# Functions
agg = unsupported_function('agg')
aggregate = unsupported_function('aggregate')
align = unsupported_function('align')
argsort = unsupported_function('argsort')
asfreq = unsupported_function('asfreq')
asof = unsupported_function('asof')
at_time = unsupported_function('at_time')
autocorr = unsupported_function('autocorr')
between = unsupported_function('between')
between_time = unsupported_function('between_time')
bfill = unsupported_function('bfill')
combine = unsupported_function('combine')
combine_first = unsupported_function('combine_first')
copy = unsupported_function('copy')
cov = unsupported_function('cov')
divmod = unsupported_function('divmod')
dot = unsupported_function('dot')
drop = unsupported_function('drop')
drop_duplicates = unsupported_function('drop_duplicates')
droplevel = unsupported_function('droplevel')
duplicated = unsupported_function('duplicated')
ewm = unsupported_function('ewm')
expanding = unsupported_function('expanding')
factorize = unsupported_function('factorize')
ffill = unsupported_function('ffill')
filter = unsupported_function('filter')
first = unsupported_function('first')
first_valid_index = unsupported_function('first_valid_index')
get = unsupported_function('get')
infer_objects = unsupported_function('infer_objects')
interpolate = unsupported_function('interpolate')
items = unsupported_function('items')
iteritems = unsupported_function('iteritems')
keys = unsupported_function('keys')
last = unsupported_function('last')
last_valid_index = unsupported_function('last_valid_index')
mad = unsupported_function('mad')
mask = unsupported_function('mask')
mode = unsupported_function('mode')
pct_change = unsupported_function('pct_change')
pop = unsupported_function('pop')
prod = unsupported_function('prod')
product = unsupported_function('product')
ravel = unsupported_function('ravel')
rdivmod = unsupported_function('rdivmod')
reindex = unsupported_function('reindex')
reindex_like = unsupported_function('reindex_like')
rename_axis = unsupported_function('rename_axis')
reorder_levels = unsupported_function('reorder_levels')
repeat = unsupported_function('repeat')
replace = unsupported_function('replace')
resample = unsupported_function('resample')
rolling = unsupported_function('rolling')
searchsorted = unsupported_function('searchsorted')
sem = unsupported_function('sem')
set_axis = unsupported_function('set_axis')
slice_shift = unsupported_function('slice_shift')
squeeze = unsupported_function('squeeze')
swapaxes = unsupported_function('swapaxes')
swaplevel = unsupported_function('swaplevel')
tail = unsupported_function('tail')
take = unsupported_function('take')
to_hdf = unsupported_function('to_hdf')
to_period = unsupported_function('to_period')
to_sql = unsupported_function('to_sql')
to_timestamp = unsupported_function('to_timestamp')
truncate = unsupported_function('truncate')
tshift = unsupported_function('tshift')
tz_convert = unsupported_function('tz_convert')
tz_localize = unsupported_function('tz_localize')
unstack = unsupported_function('unstack')
update = unsupported_function('update')
view = unsupported_function('view')
where = unsupported_function('where')
xs = unsupported_function('xs')
# Deprecated functions
as_blocks = unsupported_function('as_blocks', deprecated=True)
as_matrix = unsupported_function('as_matrix', deprecated=True)
clip_lower = unsupported_function('clip_lower', deprecated=True)
clip_upper = unsupported_function('clip_upper', deprecated=True)
compress = unsupported_function('compress', deprecated=True)
convert_objects = unsupported_function('convert_objects', deprecated=True)
get_ftype_counts = unsupported_function('get_ftype_counts', deprecated=True)
get_value = unsupported_function('get_value', deprecated=True)
nonzero = unsupported_function('nonzero', deprecated=True)
reindex_axis = unsupported_function('reindex_axis', deprecated=True)
select = unsupported_function('select', deprecated=True)
set_value = unsupported_function('set_value', deprecated=True)
valid = unsupported_function('valid', deprecated=True)
get_values = unsupported_function('get_values', deprecated=True)
to_dense = unsupported_function('to_dense', deprecated=True)
to_sparse = unsupported_function('to_sparse', deprecated=True)
to_msgpack = unsupported_function('to_msgpack', deprecated=True)
compound = unsupported_function('compound', deprecated=True)
put = unsupported_function('put', deprecated=True)
item = unsupported_function('item', deprecated=True)
ptp = unsupported_function('ptp', deprecated=True)
argmax = unsupported_function('argmax', deprecated=True)
argmin = unsupported_function('argmin', deprecated=True)
# Properties we won't support.
values = common.values(unsupported_property)
array = common.array(unsupported_property)
real = unsupported_property(
'real',
reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead.")
# Functions we won't support.
memory_usage = common.memory_usage(unsupported_function)
to_pickle = common.to_pickle(unsupported_function)
to_xarray = common.to_xarray(unsupported_function)
| 1 | 11,367 | @itholic, can you remove `nbytes = unsupported_property('nbytes')` at `_MissingPandasLikeSeries`? | databricks-koalas | py |
@@ -101,9 +101,8 @@ func TestEnvironmentConfig(t *testing.T) {
assert.True(t, conf.TaskIAMRoleEnabled, "Wrong value for TaskIAMRoleEnabled")
assert.True(t, conf.TaskIAMRoleEnabledForNetworkHost, "Wrong value for TaskIAMRoleEnabledForNetworkHost")
assert.True(t, conf.ImageCleanupDisabled, "Wrong value for ImageCleanupDisabled")
- assert.True(t, conf.TaskCPUMemLimit, "Wrong value for TaskCPUMemLimit")
- assert.True(t, conf.TaskENIEnabled, "Wrong value for TaskNetwork")
+ assert.True(t, conf.TaskENIEnabled, "Wrong value for TaskNetwork")
assert.Equal(t, (30 * time.Minute), conf.MinimumImageDeletionAge)
assert.Equal(t, (2 * time.Hour), conf.ImageCleanupInterval)
assert.Equal(t, 2, conf.NumImagesToDeletePerCycle) | 1 | // Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package config
import (
"errors"
"os"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ec2/mocks"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
func TestMerge(t *testing.T) {
conf1 := &Config{Cluster: "Foo"}
conf2 := Config{Cluster: "ignored", APIEndpoint: "Bar"}
conf3 := Config{AWSRegion: "us-west-2"}
conf1.Merge(conf2).Merge(conf3)
assert.Equal(t, conf1.Cluster, "Foo", "The cluster should not have been overridden")
assert.Equal(t, conf1.APIEndpoint, "Bar", "The APIEndpoint should have been merged in")
assert.Equal(t, conf1.AWSRegion, "us-west-2", "Incorrect region")
}
func TestBrokenEC2Metadata(t *testing.T) {
ctrl := gomock.NewController(t)
mockEc2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl)
mockEc2Metadata.EXPECT().InstanceIdentityDocument().Return(ec2metadata.EC2InstanceIdentityDocument{}, errors.New("err"))
_, err := NewConfig(mockEc2Metadata)
assert.Error(t, err, "Expected error when region isn't set and metadata doesn't work")
}
func TestBrokenEC2MetadataEndpoint(t *testing.T) {
defer setTestRegion()()
ctrl := gomock.NewController(t)
mockEc2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl)
mockEc2Metadata.EXPECT().InstanceIdentityDocument().Return(ec2metadata.EC2InstanceIdentityDocument{}, errors.New("err"))
config, err := NewConfig(mockEc2Metadata)
assert.NoError(t, err)
assert.Equal(t, config.AWSRegion, "us-west-2", "Wrong region")
assert.Zero(t, config.APIEndpoint, "Endpoint env variable not set; endpoint should be blank")
}
func TestEnvironmentConfig(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_CLUSTER", "myCluster")()
defer setTestEnv("ECS_RESERVED_PORTS_UDP", "[42,99]")()
defer setTestEnv("ECS_RESERVED_MEMORY", "20")()
defer setTestEnv("ECS_CONTAINER_STOP_TIMEOUT", "60s")()
defer setTestEnv("ECS_AVAILABLE_LOGGING_DRIVERS", "[\""+string(dockerclient.SyslogDriver)+"\"]")()
defer setTestEnv("ECS_SELINUX_CAPABLE", "true")()
defer setTestEnv("ECS_APPARMOR_CAPABLE", "true")()
defer setTestEnv("ECS_DISABLE_PRIVILEGED", "true")()
defer setTestEnv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "90s")()
defer setTestEnv("ECS_ENABLE_TASK_IAM_ROLE", "true")()
defer setTestEnv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST", "true")()
defer setTestEnv("ECS_DISABLE_IMAGE_CLEANUP", "true")()
defer setTestEnv("ECS_IMAGE_CLEANUP_INTERVAL", "2h")()
defer setTestEnv("ECS_IMAGE_MINIMUM_CLEANUP_AGE", "30m")()
defer setTestEnv("ECS_NUM_IMAGES_DELETE_PER_CYCLE", "2")()
defer setTestEnv("ECS_INSTANCE_ATTRIBUTES", "{\"my_attribute\": \"testing\"}")()
defer setTestEnv("ECS_ENABLE_TASK_ENI", "true")()
conf, err := environmentConfig()
assert.Nil(t, err)
assert.Equal(t, "myCluster", conf.Cluster)
assert.Equal(t, 2, len(conf.ReservedPortsUDP))
assert.Contains(t, conf.ReservedPortsUDP, uint16(42))
assert.Contains(t, conf.ReservedPortsUDP, uint16(99))
assert.Equal(t, uint16(20), conf.ReservedMemory)
expectedDuration, _ := time.ParseDuration("60s")
assert.Equal(t, expectedDuration, conf.DockerStopTimeout)
assert.Equal(t, []dockerclient.LoggingDriver{dockerclient.SyslogDriver}, conf.AvailableLoggingDrivers)
assert.True(t, conf.PrivilegedDisabled)
assert.True(t, conf.SELinuxCapable, "Wrong value for SELinuxCapable")
assert.True(t, conf.AppArmorCapable, "Wrong value for AppArmorCapable")
assert.True(t, conf.TaskIAMRoleEnabled, "Wrong value for TaskIAMRoleEnabled")
assert.True(t, conf.TaskIAMRoleEnabledForNetworkHost, "Wrong value for TaskIAMRoleEnabledForNetworkHost")
assert.True(t, conf.ImageCleanupDisabled, "Wrong value for ImageCleanupDisabled")
assert.True(t, conf.TaskCPUMemLimit, "Wrong value for TaskCPUMemLimit")
assert.True(t, conf.TaskENIEnabled, "Wrong value for TaskNetwork")
assert.Equal(t, (30 * time.Minute), conf.MinimumImageDeletionAge)
assert.Equal(t, (2 * time.Hour), conf.ImageCleanupInterval)
assert.Equal(t, 2, conf.NumImagesToDeletePerCycle)
assert.Equal(t, "testing", conf.InstanceAttributes["my_attribute"])
assert.Equal(t, (90 * time.Second), conf.TaskCleanupWaitDuration)
}
func TestTrimWhitespaceWhenCreating(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_CLUSTER", "default \r")()
defer setTestEnv("ECS_ENGINE_AUTH_TYPE", "dockercfg\r")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.Equal(t, cfg.Cluster, "default", "Wrong cluster")
assert.Equal(t, cfg.EngineAuthType, "dockercfg", "Wrong auth type")
}
func TestTrimWhitespace(t *testing.T) {
cfg := &Config{
Cluster: " asdf ",
AWSRegion: " us-east-1\r\t",
DataDir: "/trailing/space/directory ",
}
cfg.trimWhitespace()
assert.Equal(t, cfg, &Config{Cluster: "asdf", AWSRegion: "us-east-1", DataDir: "/trailing/space/directory "})
}
func TestConfigBoolean(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_DISABLE_METRICS", "true")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.True(t, cfg.DisableMetrics)
}
func TestBadLoggingDriverSerialization(t *testing.T) {
defer setTestEnv("ECS_AVAILABLE_LOGGING_DRIVERS", "[\"malformed]")
defer setTestRegion()()
conf, err := environmentConfig()
assert.NoError(t, err)
assert.Zero(t, len(conf.AvailableLoggingDrivers), "Wrong value for AvailableLoggingDrivers")
}
func TestBadAttributesSerialization(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_INSTANCE_ATTRIBUTES", "This is not valid JSON")()
_, err := environmentConfig()
assert.Error(t, err)
}
func TestInvalidLoggingDriver(t *testing.T) {
conf := DefaultConfig()
conf.AWSRegion = "us-west-2"
conf.AvailableLoggingDrivers = []dockerclient.LoggingDriver{"invalid-logging-driver"}
assert.Error(t, conf.validateAndOverrideBounds(), "Should be error with invalid-logging-driver")
}
func TestInvalidFormatDockerStopTimeout(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_CONTAINER_STOP_TIMEOUT", "invalid")()
conf, err := environmentConfig()
assert.NoError(t, err)
assert.Zero(t, conf.DockerStopTimeout, "Wrong value for DockerStopTimeout")
}
func TestInvalideValueDockerStopTimeout(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_CONTAINER_STOP_TIMEOUT", "-10s")()
conf, err := environmentConfig()
assert.NoError(t, err)
assert.Zero(t, conf.DockerStopTimeout)
}
func TestInvalidDockerStopTimeout(t *testing.T) {
conf := DefaultConfig()
conf.DockerStopTimeout = -1 * time.Second
assert.Error(t, conf.validateAndOverrideBounds(), "Should be error with negative DockerStopTimeout")
}
func TestInvalidFormatParseEnvVariableUint16(t *testing.T) {
defer setTestRegion()()
setTestEnv("FOO", "foo")
var16 := parseEnvVariableUint16("FOO")
assert.Zero(t, var16, "Expected 0 from parseEnvVariableUint16 for invalid Uint16 format")
}
func TestValidFormatParseEnvVariableUint16(t *testing.T) {
defer setTestRegion()()
setTestEnv("FOO", "1")
var16 := parseEnvVariableUint16("FOO")
assert.Equal(t, var16, uint16(1), "Unexpected value parsed in parseEnvVariableUint16.")
}
func TestInvalidFormatParseEnvVariableDuration(t *testing.T) {
defer setTestRegion()()
setTestEnv("FOO", "foo")
duration := parseEnvVariableDuration("FOO")
assert.Zero(t, duration, "Expected 0 from parseEnvVariableDuration for invalid format")
}
func TestValidFormatParseEnvVariableDuration(t *testing.T) {
defer setTestRegion()()
setTestEnv("FOO", "1s")
duration := parseEnvVariableDuration("FOO")
assert.Equal(t, duration, 1*time.Second, "Unexpected value parsed in parseEnvVariableDuration.")
}
func TestInvalidTaskCleanupTimeoutOverridesToThreeHours(t *testing.T) {
defer setTestRegion()()
setTestEnv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "1s")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
// If an invalid value is set, the config should pick up the default value for
// cleaning up the task.
assert.Equal(t, cfg.TaskCleanupWaitDuration, 3*time.Hour, "Defualt task cleanup wait duration set incorrectly")
}
func TestTaskCleanupTimeout(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "10m")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.Equal(t, cfg.TaskCleanupWaitDuration, 10*time.Minute, "Task cleanup wait duration set incorrectly")
}
func TestInvalidReservedMemoryOverridesToZero(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_RESERVED_MEMORY", "-1")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
// If an invalid value is set, the config should pick up the default value for
// reserved memory, which is 0.
assert.Zero(t, cfg.ReservedMemory, "Wrong value for ReservedMemory")
}
func TestReservedMemory(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_RESERVED_MEMORY", "1")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.Equal(t, cfg.ReservedMemory, uint16(1), "Wrong value for ReservedMemory.")
}
func TestTaskIAMRoleEnabled(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_ENABLE_TASK_IAM_ROLE", "true")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.True(t, cfg.TaskIAMRoleEnabled, "Wrong value for TaskIAMRoleEnabled")
}
func TestTaskIAMRoleForHostNetworkEnabled(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST", "true")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.True(t, cfg.TaskIAMRoleEnabledForNetworkHost, "Wrong value for TaskIAMRoleEnabledForNetworkHost")
}
func TestCredentialsAuditLogFile(t *testing.T) {
defer setTestRegion()()
dummyLocation := "/foo/bar.log"
defer setTestEnv("ECS_AUDIT_LOGFILE", dummyLocation)()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.Equal(t, cfg.CredentialsAuditLogFile, dummyLocation, "Wrong value for CredentialsAuditLogFile")
}
func TestCredentialsAuditLogDisabled(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_AUDIT_LOGFILE_DISABLED", "true")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.True(t, cfg.CredentialsAuditLogDisabled, "Wrong value for CredentialsAuditLogDisabled")
}
func TestImageCleanupMinimumInterval(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_IMAGE_CLEANUP_INTERVAL", "1m")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.Equal(t, cfg.ImageCleanupInterval, DefaultImageCleanupTimeInterval, "Wrong value for ImageCleanupInterval")
}
func TestImageCleanupMinimumNumImagesToDeletePerCycle(t *testing.T) {
defer setTestRegion()()
defer setTestEnv("ECS_NUM_IMAGES_DELETE_PER_CYCLE", "-1")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.Equal(t, cfg.NumImagesToDeletePerCycle, DefaultNumImagesToDeletePerCycle, "Wrong value for NumImagesToDeletePerCycle")
}
func TestTaskResourceLimitsOverride(t *testing.T) {
t.Skip()
defer setTestRegion()()
defer setTestEnv("ECS_ENABLE_TASK_CPU_MEM_LIMIT", "false")()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.False(t, cfg.TaskCPUMemLimit, "Task resource limits should be overridden to false")
}
func TestAWSVPCBlockInstanceMetadata(t *testing.T) {
defer setTestEnv("ECS_AWSVPC_BLOCK_IMDS", "true")()
defer setTestRegion()()
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.True(t, cfg.AWSVPCBlockInstanceMetdata)
}
func setTestRegion() func() {
return setTestEnv("AWS_DEFAULT_REGION", "us-west-2")
}
func setTestEnv(k, v string) func() {
os.Setenv(k, v)
return func() {
os.Unsetenv(k)
}
}
| 1 | 17,190 | What happened to the assertion on `conf.TaskCPUMemLimit`? | aws-amazon-ecs-agent | go |
@@ -5,10 +5,13 @@ import { render } from './render';
import { rerender } from './render-queue';
import options from './options';
+const createRef = Object;
+
export default {
h,
createElement,
cloneElement,
+ createRef,
Component,
render,
rerender, | 1 | import { h, h as createElement } from './h';
import { cloneElement } from './clone-element';
import { Component } from './component';
import { render } from './render';
import { rerender } from './render-queue';
import options from './options';
export default {
h,
createElement,
cloneElement,
Component,
render,
rerender,
options
};
export {
h,
createElement,
cloneElement,
Component,
render,
rerender,
options
};
| 1 | 12,073 | Just curious: Is using `Object` faster than a literal `{}`? | preactjs-preact | js |
@@ -59,6 +59,14 @@ func ValidateCertificateForACMEIssuer(crt *v1alpha1.CertificateSpec, issuer *v1a
el = append(el, field.Invalid(specPath.Child("organization"), crt.Organization, "ACME does not support setting the organization name"))
}
+ if crt.Duration.Duration != 0 {
+ el = append(el, field.Invalid(specPath.Child("duration"), crt.Duration, "ACME does not support certificate durations"))
+ }
+
+ if crt.RenewBefore.Duration != 0 {
+ el = append(el, field.Invalid(specPath.Child("renewBefore"), crt.RenewBefore, "ACME does not support certificate renewal times"))
+ }
+
return el
}
| 1 | /*
Copyright 2018 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
"github.com/jetstack/cert-manager/pkg/controller"
)
func ValidateCertificateForIssuer(crt *v1alpha1.Certificate, issuerObj v1alpha1.GenericIssuer) field.ErrorList {
el := field.ErrorList{}
path := field.NewPath("spec")
issuerType, err := controller.NameForIssuer(issuerObj)
if err != nil {
el = append(el, field.Invalid(path, err.Error(), err.Error()))
return el
}
switch issuerType {
case controller.IssuerACME:
el = append(el, ValidateCertificateForACMEIssuer(&crt.Spec, issuerObj.GetSpec(), path)...)
case controller.IssuerCA:
el = append(el, ValidateCertificateForCAIssuer(&crt.Spec, issuerObj.GetSpec(), path)...)
case controller.IssuerVault:
el = append(el, ValidateCertificateForVaultIssuer(&crt.Spec, issuerObj.GetSpec(), path)...)
case controller.IssuerSelfSigned:
el = append(el, ValidateCertificateForSelfSignedIssuer(&crt.Spec, issuerObj.GetSpec(), path)...)
}
return el
}
func ValidateCertificateForACMEIssuer(crt *v1alpha1.CertificateSpec, issuer *v1alpha1.IssuerSpec, specPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if crt.IsCA {
el = append(el, field.Invalid(specPath.Child("isCA"), crt.KeyAlgorithm, "ACME does not support CA certificates"))
}
if len(crt.Organization) != 0 {
el = append(el, field.Invalid(specPath.Child("organization"), crt.Organization, "ACME does not support setting the organization name"))
}
return el
}
func ValidateCertificateForCAIssuer(crt *v1alpha1.CertificateSpec, issuer *v1alpha1.IssuerSpec, specPath *field.Path) field.ErrorList {
el := field.ErrorList{}
return el
}
func ValidateCertificateForVaultIssuer(crt *v1alpha1.CertificateSpec, issuer *v1alpha1.IssuerSpec, specPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if crt.IsCA {
el = append(el, field.Invalid(specPath.Child("isCA"), crt.KeyAlgorithm, "Vault issuer does not currently support CA certificates"))
}
if len(crt.Organization) != 0 {
el = append(el, field.Invalid(specPath.Child("organization"), crt.Organization, "Vault issuer does not currently support setting the organization name"))
}
return el
}
func ValidateCertificateForSelfSignedIssuer(crt *v1alpha1.CertificateSpec, issuer *v1alpha1.IssuerSpec, specPath *field.Path) field.ErrorList {
el := field.ErrorList{}
return el
}
| 1 | 13,814 | How come we don't allow this to be configured with the ACME issuer? Happy to leave this as-is for now if there's a lot more consideration that needs to be made, but it seems like we could/should be able to allow this? | jetstack-cert-manager | go |
@@ -208,7 +208,6 @@ type HTTPHealthCheckOpts struct {
// NetworkLoadBalancerListener holds configuration that's need for a Network Load Balancer listener.
type NetworkLoadBalancerListener struct {
- Port string
Protocol string
TargetContainer string
TargetPort string | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"bytes"
"fmt"
"text/template"
"github.com/dustin/go-humanize/english"
"github.com/google/uuid"
"github.com/aws/aws-sdk-go/aws"
)
// Constants for template paths.
const (
// Paths of workload cloudformation templates under templates/workloads/.
fmtWkldCFTemplatePath = "workloads/%s/%s/cf.yml"
fmtWkldPartialsCFTemplatePath = "workloads/partials/cf/%s.yml"
// Directories under templates/workloads/.
servicesDirName = "services"
jobDirName = "jobs"
// Names of workload templates.
lbWebSvcTplName = "lb-web"
rdWebSvcTplName = "rd-web"
backendSvcTplName = "backend"
workerSvcTplName = "worker"
scheduledJobTplName = "scheduled-job"
)
// Constants for workload options.
const (
// AWS VPC networking configuration.
EnablePublicIP = "ENABLED"
DisablePublicIP = "DISABLED"
PublicSubnetsPlacement = "PublicSubnets"
PrivateSubnetsPlacement = "PrivateSubnets"
// RuntimePlatform configuration.
OSLinux = "LINUX"
OSWindowsServerFull = "WINDOWS_SERVER_2019_FULL"
OSWindowsServerCore = "WINDOWS_SERVER_2019_CORE"
ArchX86 = "X86_64"
ArchARM = "ARM"
ArchARM64 = "ARM64"
)
// Constants for ARN options.
const (
snsARNPattern = "arn:%s:sns:%s:%s:%s-%s-%s-%s"
)
var (
// Template names under "workloads/partials/cf/".
partialsWorkloadCFTemplateNames = []string{
"loggroup",
"envvars-container",
"envvars-common",
"secrets",
"executionrole",
"taskrole",
"workload-container",
"fargate-taskdef-base-properties",
"service-base-properties",
"servicediscovery",
"addons",
"sidecars",
"logconfig",
"autoscaling",
"eventrule",
"state-machine",
"state-machine-definition.json",
"efs-access-point",
"env-controller",
"mount-points",
"volumes",
"image-overrides",
"instancerole",
"accessrole",
"publish",
"subscribe",
"nlb",
"vpc-connector",
}
// Operating systems to determine Fargate platform versions.
osFamiliesForPV100 = []string{
OSWindowsServerFull, OSWindowsServerCore,
}
)
// WorkloadNestedStackOpts holds configuration that's needed if the workload stack has a nested stack.
type WorkloadNestedStackOpts struct {
StackName string
VariableOutputs []string
SecretOutputs []string
PolicyOutputs []string
SecurityGroupOutputs []string
}
// SidecarOpts holds configuration that's needed if the service has sidecar containers.
type SidecarOpts struct {
Name *string
Image *string
Essential *bool
Port *string
Protocol *string
CredsParam *string
Variables map[string]string
Secrets map[string]string
Storage SidecarStorageOpts
DockerLabels map[string]string
DependsOn map[string]string
EntryPoint []string
Command []string
HealthCheck *ContainerHealthCheck
}
// SidecarStorageOpts holds data structures for rendering Mount Points inside of a sidecar.
type SidecarStorageOpts struct {
MountPoints []*MountPoint
}
// StorageOpts holds data structures for rendering Volumes and Mount Points
type StorageOpts struct {
Ephemeral *int
Volumes []*Volume
MountPoints []*MountPoint
EFSPerms []*EFSPermission
ManagedVolumeInfo *ManagedVolumeCreationInfo // Used for delegating CreationInfo for Copilot-managed EFS.
}
// requiresEFSCreation returns true if managed volume information is specified; false otherwise.
func (s *StorageOpts) requiresEFSCreation() bool {
return s.ManagedVolumeInfo != nil
}
// EFSPermission holds information needed to render an IAM policy statement.
type EFSPermission struct {
FilesystemID *string
Write bool
AccessPointID *string
}
// MountPoint holds information needed to render a MountPoint in a containerdefinition.
type MountPoint struct {
ContainerPath *string
ReadOnly *bool
SourceVolume *string
}
// Volume contains fields that render a volume, its name, and EFSVolumeConfiguration
type Volume struct {
Name *string
EFS *EFSVolumeConfiguration
}
// ManagedVolumeCreationInfo holds information about how to create Copilot-managed access points.
type ManagedVolumeCreationInfo struct {
Name *string
DirName *string
UID *uint32
GID *uint32
}
// EFSVolumeConfiguration contains information about how to specify externally managed file systems.
type EFSVolumeConfiguration struct {
// EFSVolumeConfiguration
Filesystem *string
RootDirectory *string // "/" or empty are equivalent
// Authorization Config
AccessPointID *string
IAM *string // ENABLED or DISABLED
}
// LogConfigOpts holds configuration that's needed if the service is configured with Firelens to route
// its logs.
type LogConfigOpts struct {
Image *string
Destination map[string]string
EnableMetadata *string
SecretOptions map[string]string
ConfigFile *string
Variables map[string]string
Secrets map[string]string
}
// HTTPHealthCheckOpts holds configuration that's needed for HTTP Health Check.
type HTTPHealthCheckOpts struct {
HealthCheckPath string
SuccessCodes string
HealthyThreshold *int64
UnhealthyThreshold *int64
Interval *int64
Timeout *int64
DeregistrationDelay *int64
GracePeriod *int64
}
// NetworkLoadBalancerListener holds configuration that's need for a Network Load Balancer listener.
type NetworkLoadBalancerListener struct {
Port string
Protocol string
TargetContainer string
TargetPort string
SSLPolicy *string
Aliases []string
Stickness *bool
}
// NetworkLoadBalancer holds configuration that's needed for a Network Load Balancer.
type NetworkLoadBalancer struct {
PublicSubnetCIDRs []string
Listener NetworkLoadBalancerListener
MainContainerPort string
}
// AdvancedCount holds configuration for autoscaling and capacity provider
// parameters.
type AdvancedCount struct {
Spot *int
Autoscaling *AutoscalingOpts
Cps []*CapacityProviderStrategy
}
// ContainerHealthCheck holds configuration for container health check.
type ContainerHealthCheck struct {
Command []string
Interval *int64
Retries *int64
StartPeriod *int64
Timeout *int64
}
// CapacityProviderStrategy holds the configuration needed for a
// CapacityProviderStrategyItem on a Service
type CapacityProviderStrategy struct {
Base *int
Weight *int
CapacityProvider string
}
// AutoscalingOpts holds configuration that's needed for Auto Scaling.
type AutoscalingOpts struct {
MinCapacity *int
MaxCapacity *int
CPU *float64
Memory *float64
Requests *float64
ResponseTime *float64
QueueDelay *AutoscalingQueueDelayOpts
}
// AutoscalingQueueDelayOpts holds configuration to scale SQS queues.
type AutoscalingQueueDelayOpts struct {
AcceptableBacklogPerTask int
}
// ExecuteCommandOpts holds configuration that's needed for ECS Execute Command.
type ExecuteCommandOpts struct{}
// StateMachineOpts holds configuration needed for State Machine retries and timeout.
type StateMachineOpts struct {
Timeout *int
Retries *int
}
// PublishOpts holds configuration needed if the service has publishers.
type PublishOpts struct {
Topics []*Topic
}
// Topic holds information needed to render a SNSTopic in a container definition.
type Topic struct {
Name *string
Region string
Partition string
AccountID string
App string
Env string
Svc string
}
// SubscribeOpts holds configuration needed if the service has subscriptions.
type SubscribeOpts struct {
Topics []*TopicSubscription
Queue *SQSQueue
}
// HasTopicQueues returns true if any individual subscription has a dedicated queue.
func (s *SubscribeOpts) HasTopicQueues() bool {
for _, t := range s.Topics {
if t.Queue != nil {
return true
}
}
return false
}
// TopicSubscription holds information needed to render a SNS Topic Subscription in a container definition.
type TopicSubscription struct {
Name *string
Service *string
Queue *SQSQueue
}
// SQSQueue holds information needed to render a SQS Queue in a container definition.
type SQSQueue struct {
Retention *int64
Delay *int64
Timeout *int64
DeadLetter *DeadLetterQueue
}
// DeadLetterQueue holds information needed to render a dead-letter SQS Queue in a container definition.
type DeadLetterQueue struct {
Tries *uint16
}
// NetworkOpts holds AWS networking configuration for the workloads.
type NetworkOpts struct {
AssignPublicIP string
SubnetsType string
SecurityGroups []string
}
// RuntimePlatformOpts holds configuration needed for Platform configuration.
type RuntimePlatformOpts struct {
OS string
Arch string
}
// IsDefault returns true if the platform matches the default docker image platform of "linux/amd64".
func (p RuntimePlatformOpts) IsDefault() bool {
if p.isEmpty() {
return true
}
if p.OS == OSLinux && p.Arch == ArchX86 {
return true
}
return false
}
// Version returns the Fargate platform version based on the selected os family.
func (p RuntimePlatformOpts) Version() string {
for _, os := range osFamiliesForPV100 {
if p.OS == os {
return "1.0.0"
}
}
return "LATEST"
}
func (p RuntimePlatformOpts) isEmpty() bool {
return p.OS == "" && p.Arch == ""
}
// WorkloadOpts holds optional data that can be provided to enable features in a workload stack template.
type WorkloadOpts struct {
// Additional options that are common between **all** workload templates.
Variables map[string]string
Secrets map[string]string
Aliases []string
Tags map[string]string // Used by App Runner workloads to tag App Runner service resources
NestedStack *WorkloadNestedStackOpts // Outputs from nested stacks such as the addons stack.
AddonsExtraParams string // Additional user defined Parameters for the addons stack.
Sidecars []*SidecarOpts
LogConfig *LogConfigOpts
Autoscaling *AutoscalingOpts
CapacityProviders []*CapacityProviderStrategy
DesiredCountOnSpot *int
Storage *StorageOpts
Network NetworkOpts
ExecuteCommand *ExecuteCommandOpts
Platform RuntimePlatformOpts
EntryPoint []string
Command []string
DomainAlias string
DockerLabels map[string]string
DependsOn map[string]string
Publish *PublishOpts
ServiceDiscoveryEndpoint string
HTTPVersion *string
ALBEnabled bool
// Additional options for service templates.
WorkloadType string
HealthCheck *ContainerHealthCheck
HTTPHealthCheck HTTPHealthCheckOpts
DeregistrationDelay *int64
AllowedSourceIps []string
NLB *NetworkLoadBalancer
// Lambda functions.
RulePriorityLambda string
DesiredCountLambda string
EnvControllerLambda string
CredentialsParameter string
BacklogPerTaskCalculatorLambda string
NLBCertValidatorFunctionLambda string
NLBCustomDomainFunctionLambda string
// Additional options for job templates.
ScheduleExpression string
StateMachine *StateMachineOpts
// Additional options for request driven web service templates.
StartCommand *string
EnableHealthCheck bool
// Input needed for the custom resource that adds a custom domain to the service.
Alias *string
ScriptBucketName *string
CustomDomainLambda *string
AWSSDKLayer *string
AppDNSDelegationRole *string
AppDNSName *string
// Additional options for worker service templates.
Subscribe *SubscribeOpts
// List of features to enable for testing that are not yet released.
FeatureFlags []string
}
// ParseLoadBalancedWebService parses a load balanced web service's CloudFormation template
// with the specified data object and returns its content.
func (t *Template) ParseLoadBalancedWebService(data WorkloadOpts) (*Content, error) {
return t.parseSvc(lbWebSvcTplName, data, withSvcParsingFuncs())
}
// ParseRequestDrivenWebService parses a request-driven web service's CloudFormation template
// with the specified data object and returns its content.
func (t *Template) ParseRequestDrivenWebService(data WorkloadOpts) (*Content, error) {
return t.parseSvc(rdWebSvcTplName, data, withSvcParsingFuncs())
}
// ParseBackendService parses a backend service's CloudFormation template with the specified data object and returns its content.
func (t *Template) ParseBackendService(data WorkloadOpts) (*Content, error) {
return t.parseSvc(backendSvcTplName, data, withSvcParsingFuncs())
}
// ParseWorkerService parses a worker service's CloudFormation template with the specified data object and returns its content.
func (t *Template) ParseWorkerService(data WorkloadOpts) (*Content, error) {
return t.parseSvc(workerSvcTplName, data, withSvcParsingFuncs())
}
// ParseScheduledJob parses a scheduled job's Cloudformation Template
func (t *Template) ParseScheduledJob(data WorkloadOpts) (*Content, error) {
return t.parseJob(scheduledJobTplName, data, withSvcParsingFuncs())
}
// parseSvc parses a service's CloudFormation template with the specified data object and returns its content.
func (t *Template) parseSvc(name string, data interface{}, options ...ParseOption) (*Content, error) {
return t.parseWkld(name, servicesDirName, data, options...)
}
// parseJob parses a job's Cloudformation template with the specified data object and returns its content.
func (t *Template) parseJob(name string, data interface{}, options ...ParseOption) (*Content, error) {
return t.parseWkld(name, jobDirName, data, options...)
}
func (t *Template) parseWkld(name, wkldDirName string, data interface{}, options ...ParseOption) (*Content, error) {
tpl, err := t.parse("base", fmt.Sprintf(fmtWkldCFTemplatePath, wkldDirName, name), options...)
if err != nil {
return nil, err
}
for _, templateName := range partialsWorkloadCFTemplateNames {
nestedTpl, err := t.parse(templateName, fmt.Sprintf(fmtWkldPartialsCFTemplatePath, templateName), options...)
if err != nil {
return nil, err
}
_, err = tpl.AddParseTree(templateName, nestedTpl.Tree)
if err != nil {
return nil, fmt.Errorf("add parse tree of %s to base template: %w", templateName, err)
}
}
buf := &bytes.Buffer{}
if err := tpl.Execute(buf, data); err != nil {
return nil, fmt.Errorf("execute template %s with data %v: %w", name, data, err)
}
return &Content{buf}, nil
}
func withSvcParsingFuncs() ParseOption {
return func(t *template.Template) *template.Template {
return t.Funcs(map[string]interface{}{
"toSnakeCase": ToSnakeCaseFunc,
"hasSecrets": hasSecrets,
"fmtSlice": FmtSliceFunc,
"quoteSlice": QuoteSliceFunc,
"randomUUID": randomUUIDFunc,
"jsonMountPoints": generateMountPointJSON,
"jsonSNSTopics": generateSNSJSON,
"jsonQueueURIs": generateQueueURIJSON,
"envControllerParams": envControllerParameters,
"logicalIDSafe": StripNonAlphaNumFunc,
"wordSeries": english.WordSeries,
"pluralWord": english.PluralWord,
"contains": contains,
})
}
}
func hasSecrets(opts WorkloadOpts) bool {
if len(opts.Secrets) > 0 {
return true
}
if opts.NestedStack != nil && (len(opts.NestedStack.SecretOutputs) > 0) {
return true
}
return false
}
func randomUUIDFunc() (string, error) {
id, err := uuid.NewRandom()
if err != nil {
return "", fmt.Errorf("generate random uuid: %w", err)
}
return id.String(), err
}
// envControllerParameters determines which parameters to include in the EnvController template.
func envControllerParameters(o WorkloadOpts) []string {
parameters := []string{}
if o.WorkloadType == "Load Balanced Web Service" {
if o.ALBEnabled {
parameters = append(parameters, "ALBWorkloads,")
}
parameters = append(parameters, "Aliases,") // YAML needs the comma separator; resolved in EnvContr.
}
if o.Network.SubnetsType == PrivateSubnetsPlacement {
parameters = append(parameters, "NATWorkloads,")
}
if o.Storage != nil && o.Storage.requiresEFSCreation() {
parameters = append(parameters, "EFSWorkloads,")
}
return parameters
}
func contains(list []string, s string) bool {
for _, item := range list {
if item == s {
return true
}
}
return false
}
// ARN determines the arn for a topic using the SNSTopic name and account information
func (t Topic) ARN() string {
return fmt.Sprintf(snsARNPattern, t.Partition, t.Region, t.AccountID, t.App, t.Env, t.Svc, aws.StringValue(t.Name))
}
| 1 | 20,453 | Should we remove the `Aliases` field as well? | aws-copilot-cli | go |
@@ -88,6 +88,8 @@ public class VoiceSearchWidget extends UIWidget implements WidgetManagerDelegate
mMozillaSpeechService = MozillaSpeechService.getInstance();
mMozillaSpeechService.setProductTag("fxr");
+ mMozillaSpeechService.storeSamples(false);
+ mMozillaSpeechService.storeTranscriptions(false);
mVoiceSearchText1 = findViewById(R.id.voiceSearchText1);
mVoiceSearchText2 = findViewById(R.id.voiceSearchText2); | 1 | package org.mozilla.vrbrowser.ui.widgets;
import android.Manifest;
import android.app.Activity;
import android.app.Application;
import android.content.Context;
import android.content.pm.PackageManager;
import android.graphics.drawable.ClipDrawable;
import android.graphics.drawable.Drawable;
import android.graphics.drawable.LayerDrawable;
import android.os.Bundle;
import android.support.annotation.IdRes;
import android.support.v4.app.ActivityCompat;
import android.util.AttributeSet;
import android.util.Log;
import android.view.Gravity;
import android.view.View;
import android.view.animation.Animation;
import android.view.animation.LinearInterpolator;
import android.view.animation.RotateAnimation;
import android.widget.ImageView;
import android.widget.TextView;
import com.mozilla.speechlibrary.ISpeechRecognitionListener;
import com.mozilla.speechlibrary.MozillaSpeechService;
import com.mozilla.speechlibrary.STTResult;
import org.mozilla.vrbrowser.R;
import org.mozilla.vrbrowser.audio.AudioEngine;
import org.mozilla.vrbrowser.ui.views.UIButton;
import static org.mozilla.gecko.GeckoAppShell.getApplicationContext;
public class VoiceSearchWidget extends UIWidget implements WidgetManagerDelegate.PermissionListener,
Application.ActivityLifecycleCallbacks, WidgetManagerDelegate.FocusChangeListener {
private static final String LOGTAG = "VRB";
private static final int VOICESEARCH_AUDIO_REQUEST_CODE = 7455;
private static final int ANIMATION_DURATION = 1000;
private static int MAX_CLIPPING = 10000;
private static int MAX_DB = 130;
private static int MIN_DB = 50;
public interface VoiceSearchDelegate {
void OnVoiceSearchResult(String transcription, float confidance);
void OnVoiceSearchCanceled();
void OnVoiceSearchError();
}
private MozillaSpeechService mMozillaSpeechService;
private VoiceSearchDelegate mDelegate;
private ImageView mVoiceSearchInput;
private ImageView mVoiceSearchSearching;
private Drawable mVoiceInputBackgroundDrawable;
private ClipDrawable mVoiceInputClipDrawable;
private int mVoiceInputGravity;
private TextView mVoiceSearchText1;
private TextView mVoiceSearchText2;
private TextView mVoiceSearchText3;
private RotateAnimation mSearchingAnimation;
private boolean mIsSpeechRecognitionRunning = false;
private boolean mWasSpeechRecognitionRunning = false;
private AudioEngine mAudio;
public VoiceSearchWidget(Context aContext) {
super(aContext);
initialize(aContext);
}
public VoiceSearchWidget(Context aContext, AttributeSet aAttrs) {
super(aContext, aAttrs);
initialize(aContext);
}
public VoiceSearchWidget(Context aContext, AttributeSet aAttrs, int aDefStyle) {
super(aContext, aAttrs, aDefStyle);
initialize(aContext);
}
private void initialize(Context aContext) {
inflate(aContext, R.layout.voice_search_dialog, this);
mAudio = AudioEngine.fromContext(aContext);
mWidgetManager.addFocusChangeListener(this);
mWidgetManager.addPermissionListener(this);
mMozillaSpeechService = MozillaSpeechService.getInstance();
mMozillaSpeechService.setProductTag("fxr");
mVoiceSearchText1 = findViewById(R.id.voiceSearchText1);
mVoiceSearchText2 = findViewById(R.id.voiceSearchText2);
mVoiceSearchText3 = findViewById(R.id.voiceSearchText3);
mVoiceInputGravity = 0;
mVoiceInputBackgroundDrawable = getResources().getDrawable(R.drawable.ic_voice_search_volume_input_black, getContext().getTheme());
mVoiceInputClipDrawable = new ClipDrawable(getContext().getDrawable(R.drawable.ic_voice_search_volume_input_clip), Gravity.START, ClipDrawable.HORIZONTAL);
Drawable[] layers = new Drawable[] {mVoiceInputBackgroundDrawable, mVoiceInputClipDrawable };
mVoiceSearchInput = findViewById(R.id.voiceSearchInput);
mVoiceSearchInput.setImageDrawable(new LayerDrawable(layers));
mVoiceInputClipDrawable.setLevel(mVoiceInputGravity);
mSearchingAnimation = new RotateAnimation(0, 360f,
Animation.RELATIVE_TO_SELF, 0.5f,
Animation.RELATIVE_TO_SELF, 0.5f);
mSearchingAnimation.setInterpolator(new LinearInterpolator());
mSearchingAnimation.setDuration(ANIMATION_DURATION);
mSearchingAnimation.setRepeatCount(Animation.INFINITE);
mVoiceSearchSearching = findViewById(R.id.voiceSearchSearching);
UIButton backButton = findViewById(R.id.backButton);
backButton.setOnClickListener(view -> {
if (mAudio != null) {
mAudio.playSound(AudioEngine.Sound.CLICK);
}
onDismiss();
});
((Application)getApplicationContext()).registerActivityLifecycleCallbacks(this);
}
public void setDelegate(VoiceSearchDelegate delegate) {
mDelegate = delegate;
}
@Override
public void releaseWidget() {
mWidgetManager.removeFocusChangeListener(this);
mWidgetManager.removePermissionListener(this);
mMozillaSpeechService.removeListener(mVoiceSearchListener);
((Application)getApplicationContext()).unregisterActivityLifecycleCallbacks(this);
super.releaseWidget();
}
@Override
protected void initializeWidgetPlacement(WidgetPlacement aPlacement) {
aPlacement.visible = false;
aPlacement.width = WidgetPlacement.dpDimension(getContext(), R.dimen.voice_search_width);
aPlacement.height = WidgetPlacement.dpDimension(getContext(), R.dimen.voice_search_height);
aPlacement.parentAnchorX = 0.5f;
aPlacement.parentAnchorY = 0.5f;
aPlacement.anchorX = 0.5f;
aPlacement.anchorY = 0.5f;
aPlacement.translationY = WidgetPlacement.unitFromMeters(getContext(), R.dimen.restart_dialog_world_y);
aPlacement.translationZ = WidgetPlacement.unitFromMeters(getContext(), R.dimen.restart_dialog_world_z);
}
private ISpeechRecognitionListener mVoiceSearchListener = new ISpeechRecognitionListener() {
public void onSpeechStatusChanged(final MozillaSpeechService.SpeechState aState, final Object aPayload){
((Activity)getContext()).runOnUiThread(new Runnable() {
@Override
public void run() {
switch (aState) {
case DECODING:
// Handle when the speech object changes to decoding state
Log.d(LOGTAG, "===> DECODING");
setDecodingState();
break;
case MIC_ACTIVITY:
// Captures the activity from the microphone
Log.d(LOGTAG, "===> MIC_ACTIVITY");
double db = (double)aPayload * -1; // the higher the value, quieter the user/environment is
db = db == Double.POSITIVE_INFINITY ? MAX_DB : db;
int level = (int)(MAX_CLIPPING - (((db - MIN_DB) / (MAX_DB - MIN_DB)) * MAX_CLIPPING));
Log.d(LOGTAG, "===> db: " + db);
Log.d(LOGTAG, "===> level " + level);
mVoiceInputClipDrawable.setLevel(level);
break;
case STT_RESULT:
// When the api finished processing and returned a hypothesis
Log.d(LOGTAG, "===> STT_RESULT");
String transcription = ((STTResult)aPayload).mTranscription;
float confidence = ((STTResult)aPayload).mConfidence;
if (mDelegate != null)
mDelegate.OnVoiceSearchResult(transcription, confidence);
hide();
break;
case START_LISTEN:
// Handle when the api successfully opened the microphone and started listening
Log.d(LOGTAG, "===> START_LISTEN");
break;
case NO_VOICE:
// Handle when the api didn't detect any voice
Log.d(LOGTAG, "===> NO_VOICE");
setResultState();
break;
case CANCELED:
// Handle when a cancelation was fully executed
Log.d(LOGTAG, "===> CANCELED");
setResultState();
if (mDelegate != null)
mDelegate.OnVoiceSearchCanceled();
break;
case ERROR:
Log.d(LOGTAG, "===> ERROR: " + aPayload.toString());
setResultState();
// Handle when any error occurred
if (mDelegate != null)
mDelegate.OnVoiceSearchError();
break;
default:
break;
}
}
});
}
};
public void startVoiceSearch() {
if (ActivityCompat.checkSelfPermission(getApplicationContext(), Manifest.permission.RECORD_AUDIO)
!= PackageManager.PERMISSION_GRANTED) {
ActivityCompat.requestPermissions((Activity)getContext(), new String[]{Manifest.permission.RECORD_AUDIO},
VOICESEARCH_AUDIO_REQUEST_CODE);
} else {
mMozillaSpeechService.setLanguage("en-us");
mMozillaSpeechService.start(getApplicationContext());
mIsSpeechRecognitionRunning = true;
}
}
public void stopVoiceSearch() {
try {
mMozillaSpeechService.cancel();
mIsSpeechRecognitionRunning = false;
} catch (Exception e) {
Log.d(LOGTAG, e.getLocalizedMessage());
e.printStackTrace();
}
}
@Override
public void onRequestPermissionsResult(int requestCode, String[] permissions, int[] grantResults) {
boolean granted = false;
if (requestCode == VOICESEARCH_AUDIO_REQUEST_CODE) {
for (int result: grantResults) {
if (result == PackageManager.PERMISSION_GRANTED) {
granted = true;
break;
}
}
if (granted) {
startVoiceSearch();
} else {
setPermissionNotGranted();
}
}
}
@Override
public void show() {
super.show();
setStartListeningState();
mMozillaSpeechService.addListener(mVoiceSearchListener);
startVoiceSearch();
}
@Override
public void hide() {
super.hide();
mMozillaSpeechService.removeListener(mVoiceSearchListener);
stopVoiceSearch();
}
private void setStartListeningState() {
mVoiceSearchText1.setText(R.string.voice_search_start);
mVoiceSearchText1.setVisibility(View.VISIBLE);
mVoiceSearchText2.setVisibility(View.GONE);
mVoiceSearchText3.setVisibility(View.VISIBLE);
mVoiceSearchInput.setVisibility(View.VISIBLE);
mVoiceSearchSearching.clearAnimation();
mVoiceSearchSearching.setVisibility(View.INVISIBLE);
}
private void setDecodingState() {
mVoiceSearchText1.setText(R.string.voice_search_decoding);
mVoiceSearchText1.setVisibility(View.VISIBLE);
mVoiceSearchText2.setVisibility(View.GONE);
mVoiceSearchText3.setVisibility(View.INVISIBLE);
mVoiceSearchInput.setVisibility(View.INVISIBLE);
mVoiceSearchSearching.startAnimation(mSearchingAnimation);
mVoiceSearchSearching.setVisibility(View.VISIBLE);
}
private void setResultState() {
mVoiceSearchText1.setText(R.string.voice_search_error);
mVoiceSearchText1.setVisibility(View.VISIBLE);
mVoiceSearchText2.setText(R.string.voice_search_try_again);
mVoiceSearchText2.setVisibility(View.VISIBLE);
mVoiceSearchText3.setVisibility(View.VISIBLE);
mVoiceSearchInput.setVisibility(View.VISIBLE);
mVoiceSearchSearching.clearAnimation();
mVoiceSearchSearching.setVisibility(View.INVISIBLE);
stopVoiceSearch();
startVoiceSearch();
}
private void setPermissionNotGranted() {
mVoiceSearchText1.setText(R.string.voice_search_permission_after_decline);
mVoiceSearchText1.setVisibility(View.VISIBLE);
mVoiceSearchText2.setVisibility(View.GONE);
mVoiceSearchText3.setVisibility(View.INVISIBLE);
mVoiceSearchInput.setVisibility(View.INVISIBLE);
mVoiceSearchSearching.clearAnimation();
mVoiceSearchSearching.setVisibility(View.INVISIBLE);
}
@Override
public void onActivityCreated(Activity activity, Bundle bundle) {
}
@Override
public void onActivityStarted(Activity activity) {
}
@Override
public void onActivityResumed(Activity activity) {
if (mWasSpeechRecognitionRunning) {
mMozillaSpeechService.addListener(mVoiceSearchListener);
startVoiceSearch();
}
}
@Override
public void onActivityPaused(Activity activity) {
mWasSpeechRecognitionRunning = mIsSpeechRecognitionRunning;
if (mIsSpeechRecognitionRunning) {
mMozillaSpeechService.removeListener(mVoiceSearchListener);
stopVoiceSearch();
}
}
@Override
public void onActivityStopped(Activity activity) {
}
@Override
public void onActivitySaveInstanceState(Activity activity, Bundle bundle) {
}
@Override
public void onActivityDestroyed(Activity activity) {
}
// WidgetManagerDelegate.FocusChangeListener
@Override
public void onGlobalFocusChanged(View oldFocus, View newFocus) {
boolean shouldHide = true;
if (newFocus != null) {
@IdRes Integer view_id = (Integer) newFocus.getTag(R.string.view_id_tag);
if (view_id != null && view_id == R.id.microphoneButton) {
shouldHide = false;
}
}
if (isVisible() && shouldHide) {
hide();
}
}
}
| 1 | 6,744 | what's the effect of turning these two off? is there an issue on file for context? | MozillaReality-FirefoxReality | java |
@@ -35,9 +35,10 @@ type CLITestType string
// List all test types here
const (
- Wrapper CLITestType = "1 wrapper"
- GcloudProdWrapperLatest CLITestType = "2 gcloud-prod wrapper-latest"
- GcloudLatestWrapperLatest CLITestType = "3 gcloud-latest wrapper-latest"
+ Wrapper CLITestType = "1 wrapper"
+ GcloudBetaProdWrapperLatest CLITestType = "2 gcloud-beta-prod wrapper-latest"
+ GcloudBetaLatestWrapperLatest CLITestType = "3 gcloud-beta-latest wrapper-latest"
+ GcloudGaLatestWrapperRelease CLITestType = "4 gcloud-ga-latest wrapper-release"
)
var ( | 1 | // Copyright 2020 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package utils contains e2e tests utils for cli tools e2e tests
package utils
import (
"context"
"fmt"
"log"
"os"
"os/exec"
"regexp"
"sort"
"strings"
"sync"
"github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/junitxml"
"github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/test_config"
)
// CLITestType defines which type of test is going to be executed
type CLITestType string
// List all test types here
const (
Wrapper CLITestType = "1 wrapper"
GcloudProdWrapperLatest CLITestType = "2 gcloud-prod wrapper-latest"
GcloudLatestWrapperLatest CLITestType = "3 gcloud-latest wrapper-latest"
)
var (
gcloudUpdateLock = sync.Mutex{}
)
// CLITestSuite executes given test suite.
func CLITestSuite(ctx context.Context, tswg *sync.WaitGroup, testSuites chan *junitxml.TestSuite,
logger *log.Logger, testSuiteRegex, testCaseRegex *regexp.Regexp,
testProjectConfig *testconfig.Project, testSuiteName string, testsMap map[CLITestType]map[*junitxml.TestCase]func(
context.Context, *junitxml.TestCase, *log.Logger, *testconfig.Project, CLITestType)) {
defer tswg.Done()
if testSuiteRegex != nil && !testSuiteRegex.MatchString(testSuiteName) {
return
}
testSuite := junitxml.NewTestSuite(testSuiteName)
defer testSuite.Finish(testSuites)
logger.Printf("Running CLITestSuite %q", testSuite.Name)
tests := runTestCases(ctx, logger, testCaseRegex, testProjectConfig, testSuite.Name, testsMap)
for ret := range tests {
testSuite.TestCase = append(testSuite.TestCase, ret)
}
logger.Printf("Finished CLITestSuite %q", testSuite.Name)
}
func runTestCases(ctx context.Context, logger *log.Logger, regex *regexp.Regexp,
testProjectConfig *testconfig.Project, testSuiteName string, testsMap map[CLITestType]map[*junitxml.TestCase]func(
context.Context, *junitxml.TestCase, *log.Logger, *testconfig.Project, CLITestType)) chan *junitxml.TestCase {
tests := make(chan *junitxml.TestCase)
var ttwg sync.WaitGroup
ttwg.Add(len(testsMap))
tts := make([]string, 0, len(testsMap))
for tt := range testsMap {
tts = append(tts, string(tt))
}
sort.Strings(tts)
go func() {
for _, ttStr := range tts {
tt := CLITestType(ttStr)
m := testsMap[tt]
logger.Printf("=== Running CLITestSuite %v for test type %v ===", testSuiteName, tt)
var wg sync.WaitGroup
for tc, f := range m {
wg.Add(1)
go func(ctx context.Context, wg *sync.WaitGroup, tc *junitxml.TestCase, tt CLITestType, f func(
context.Context, *junitxml.TestCase, *log.Logger, *testconfig.Project, CLITestType)) {
defer wg.Done()
if tc.FilterTestCase(regex) {
tc.Finish(tests)
} else {
defer logger.Printf("TestCase %s.%q finished in %fs", tc.Classname, tc.Name, tc.Time)
defer tc.Finish(tests)
logger.Printf("Running TestCase %s.%q", tc.Classname, tc.Name)
f(ctx, tc, logger, testProjectConfig, tt)
}
}(ctx, &wg, tc, tt, f)
}
wg.Wait()
ttwg.Done()
logger.Printf("=== Finished running CLITestSuite %v for test type %v ===", testSuiteName, tt)
}
}()
go func() {
ttwg.Wait()
close(tests)
}()
return tests
}
// RunCliTool runs a cli tool with given args
func RunCliTool(logger *log.Logger, testCase *junitxml.TestCase, cmdString string, args []string) error {
prefix := "Test Env"
if testCase != nil {
prefix = testCase.Name
}
logger.Printf("[%v] Running command: '%s %s'", prefix, cmdString, strings.Join(args, " "))
cmd := exec.Command(cmdString, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// RunTestCommand runs given test command
func RunTestCommand(cmd string, args []string, logger *log.Logger, testCase *junitxml.TestCase) bool {
if err := RunCliTool(logger, testCase, cmd, args); err != nil {
Failure(testCase, logger, fmt.Sprintf("Error running cmd: %v\n", err))
return false
}
return true
}
// RunTestCommandIgnoringError runs given test command. The test case won't be marked as fail even error happens.
func RunTestCommandIgnoringError(cmd string, args []string, logger *log.Logger, testCase *junitxml.TestCase) bool {
if err := RunCliTool(logger, testCase, cmd, args); err != nil {
logger.Printf("[%v] %v", testCase.Name, fmt.Sprintf("Error running cmd: %v\n", err))
return false
}
return true
}
func runCliToolAsync(logger *log.Logger, testCase *junitxml.TestCase, cmdString string, args []string) (*exec.Cmd, error) {
logger.Printf("[%v] Running command: '%s %s'", testCase.Name, cmdString, strings.Join(args, " "))
cmd := exec.Command(cmdString, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Start()
return cmd, err
}
// RunTestCommandAsync runs given test command asynchronously
func RunTestCommandAsync(cmd string, args []string, logger *log.Logger, testCase *junitxml.TestCase) *exec.Cmd {
cmdPtr, err := runCliToolAsync(logger, testCase, cmd, args)
if err != nil {
Failure(testCase, logger, fmt.Sprintf("Error starting cmd: %v\n", err))
return nil
}
return cmdPtr
}
// GcloudAuth runs "gcloud auth"
func GcloudAuth(logger *log.Logger, testCase *junitxml.TestCase) bool {
// This file exists in test env. For local testing, download a creds file from project
// compute-image-tools-test.
credsPath := "/etc/compute-image-tools-test-service-account/creds.json"
cmd := "gcloud"
args := []string{"auth", "activate-service-account", "--key-file=" + credsPath}
if err := RunCliTool(logger, testCase, cmd, args); err != nil {
Failure(testCase, logger, fmt.Sprintf("Error running cmd: %v\n", err))
return false
}
return true
}
// GcloudUpdate runs "gcloud update" to pull either latest or prod version
func GcloudUpdate(logger *log.Logger, testCase *junitxml.TestCase, latest bool) bool {
gcloudUpdateLock.Lock()
defer gcloudUpdateLock.Unlock()
// auth is required for gcloud updates
if !GcloudAuth(logger, testCase) {
return false
}
cmd := "gcloud"
if latest {
args := []string{"components", "repositories", "add",
"https://storage.googleapis.com/cloud-sdk-testing/ci/staging/components-2.json", "--quiet"}
if err := RunCliTool(logger, testCase, cmd, args); err != nil {
logger.Printf("Error running cmd: %v\n", err)
testCase.WriteFailure("Error running cmd: %v", err)
return false
}
} else {
args := []string{"components", "repositories", "remove", "--all"}
if err := RunCliTool(logger, testCase, cmd, args); err != nil {
logger.Printf("Error running cmd: %v\n", err)
testCase.WriteFailure("Error running cmd: %v", err)
return false
}
}
args := []string{"components", "update", "--quiet"}
if err := RunCliTool(logger, testCase, cmd, args); err != nil {
logger.Printf("Error running cmd: %v\n", err)
testCase.WriteFailure("Error running cmd: %v", err)
return false
}
// an additional auth is required if updated through a different repository
if !GcloudAuth(logger, testCase) {
return false
}
return true
}
// RunTestForTestType runs test for given test type
func RunTestForTestType(cmd string, args []string, testType CLITestType, logger *log.Logger, testCase *junitxml.TestCase) bool {
switch testType {
case Wrapper:
if !RunTestCommand(cmd, args, logger, testCase) {
return false
}
case GcloudProdWrapperLatest:
if !GcloudUpdate(logger, testCase, false) {
return false
}
if !RunTestCommand(cmd, args, logger, testCase) {
return false
}
case GcloudLatestWrapperLatest:
if !GcloudUpdate(logger, testCase, true) {
return false
}
if !RunTestCommand(cmd, args, logger, testCase) {
return false
}
}
return true
}
// Failure logs failure message to both test case output and logger.
func Failure(testCase *junitxml.TestCase, logger *log.Logger, msg string) {
prefix := "Test Env"
if testCase != nil {
prefix = testCase.Name
testCase.WriteFailure(msg)
}
logger.Printf("[%v] %v", prefix, msg)
}
// ContainsSubString checks whether the string slice contains a substring anywhere.
func ContainsSubString(strs []string, s string) bool {
for _, str := range strs {
if strings.Contains(str, s) {
return true
}
}
return false
}
| 1 | 12,261 | The format like gcloud-beta-prod is a bit hard to understand. Maybe find a a more clear way to describe it, | GoogleCloudPlatform-compute-image-tools | go |
@@ -20,7 +20,7 @@ namespace OpenTelemetry.Tags
/// <summary>
/// Collection of tags representing the tags context.
/// </summary>
- public interface ITagContext : IEnumerable<Tag>
+ public interface ITagContext : IEnumerable<DistributedContextEntry>
{
}
} | 1 | // <copyright file="ITagContext.cs" company="OpenTelemetry Authors">
// Copyright 2018, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System.Collections.Generic;
namespace OpenTelemetry.Tags
{
/// <summary>
/// Collection of tags representing the tags context.
/// </summary>
public interface ITagContext : IEnumerable<Tag>
{
}
}
| 1 | 12,541 | Are we renaming this too in a follow up PR? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -179,7 +179,8 @@ class Preference extends Model
public function getLocaleOptions()
{
$localeOptions = [
- 'be' => [Lang::get('system::lang.locale.be'), 'flag-by'],
+ 'ar' => [Lang::get('system::lang.locale.ar'), 'flag-sa'],
+ 'be' => [Lang::get('system::lang.locale.be'), 'flag-by'],
'cs' => [Lang::get('system::lang.locale.cs'), 'flag-cz'],
'da' => [Lang::get('system::lang.locale.da'), 'flag-dk'],
'en' => [Lang::get('system::lang.locale.en'), 'flag-us'], | 1 | <?php namespace Backend\Models;
use App;
use Lang;
use Model;
use Config;
use Session;
use BackendAuth;
use DirectoryIterator;
use DateTime;
use DateTimeZone;
use Carbon\Carbon;
/**
* Backend preferences for the backend user
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class Preference extends Model
{
use \October\Rain\Database\Traits\Validation;
const DEFAULT_THEME = 'twilight';
/**
* @var array Behaviors implemented by this model.
*/
public $implement = [
\Backend\Behaviors\UserPreferencesModel::class
];
/**
* @var string Unique code
*/
public $settingsCode = 'backend::backend.preferences';
/**
* @var mixed Settings form field defitions
*/
public $settingsFields = 'fields.yaml';
/**
* @var array Validation rules
*/
public $rules = [];
/**
* Initialize the seed data for this model. This only executes when the
* model is first created or reset to default.
* @return void
*/
public function initSettingsData()
{
$config = App::make('config');
$this->locale = $config->get('app.locale', 'en');
$this->fallback_locale = $this->getFallbackLocale($this->locale);
$this->timezone = $config->get('cms.backendTimezone', $config->get('app.timezone'));
$this->editor_font_size = $config->get('editor.font_size', 12);
$this->editor_word_wrap = $config->get('editor.word_wrap', 'fluid');
$this->editor_code_folding = $config->get('editor.code_folding', 'manual');
$this->editor_tab_size = $config->get('editor.tab_size', 4);
$this->editor_theme = $config->get('editor.theme', static::DEFAULT_THEME);
$this->editor_show_invisibles = $config->get('editor.show_invisibles', false);
$this->editor_highlight_active_line = $config->get('editor.highlight_active_line', true);
$this->editor_use_hard_tabs = $config->get('editor.use_hard_tabs', false);
$this->editor_show_gutter = $config->get('editor.show_gutter', true);
$this->editor_auto_closing = $config->get('editor.auto_closing', false);
$this->editor_autocompletion = $config->get('editor.editor_autocompletion', 'manual');
$this->editor_enable_snippets = $config->get('editor.enable_snippets', false);
$this->editor_display_indent_guides = $config->get('editor.display_indent_guides', false);
$this->editor_show_print_margin = $config->get('editor.show_print_margin', false);
}
/**
* Set the application's locale based on the user preference.
* @return void
*/
public static function setAppLocale()
{
if (Session::has('locale')) {
App::setLocale(Session::get('locale'));
}
elseif (
($user = BackendAuth::getUser()) &&
($locale = static::get('locale'))
) {
Session::put('locale', $locale);
App::setLocale($locale);
}
}
/**
* Same as setAppLocale except for the fallback definition.
* @return void
*/
public static function setAppFallbackLocale()
{
if (Session::has('fallback_locale')) {
Lang::setFallback(Session::get('fallback_locale'));
}
elseif (
($user = BackendAuth::getUser()) &&
($locale = static::get('fallback_locale'))
) {
Session::put('fallback_locale', $locale);
Lang::setFallback($locale);
}
}
//
// Events
//
public function beforeValidate()
{
$this->fallback_locale = $this->getFallbackLocale($this->locale);
}
public function afterSave()
{
Session::put('locale', $this->locale);
Session::put('fallback_locale', $this->fallback_locale);
}
//
// Utils
//
/**
* Called when this model is reset to default by the user.
* @return void
*/
public function resetDefault()
{
parent::resetDefault();
Session::forget('locale');
Session::forget('fallback_locale');
}
/**
* Overrides the config with the user's preference.
* @return void
*/
public static function applyConfigValues()
{
$settings = self::instance();
Config::set('app.locale', $settings->locale);
Config::set('app.fallback_locale', $settings->fallback_locale);
}
//
// Getters
//
/**
* Attempt to extract the language from the locale,
* otherwise use the configuration.
* @return string
*/
protected function getFallbackLocale($locale)
{
if ($position = strpos($locale, '-')) {
$target = substr($locale, 0, $position);
$available = $this->getLocaleOptions();
if (isset($available[$target])) {
return $target;
}
}
return Config::get('app.fallback_locale');
}
/**
* Returns available options for the "locale" attribute.
* @return array
*/
public function getLocaleOptions()
{
$localeOptions = [
'be' => [Lang::get('system::lang.locale.be'), 'flag-by'],
'cs' => [Lang::get('system::lang.locale.cs'), 'flag-cz'],
'da' => [Lang::get('system::lang.locale.da'), 'flag-dk'],
'en' => [Lang::get('system::lang.locale.en'), 'flag-us'],
'en-au' => [Lang::get('system::lang.locale.en-au'), 'flag-au'],
'en-ca' => [Lang::get('system::lang.locale.en-ca'), 'flag-ca'],
'en-gb' => [Lang::get('system::lang.locale.en-gb'), 'flag-gb'],
'et' => [Lang::get('system::lang.locale.et'), 'flag-ee'],
'de' => [Lang::get('system::lang.locale.de'), 'flag-de'],
'es' => [Lang::get('system::lang.locale.es'), 'flag-es'],
'es-ar' => [Lang::get('system::lang.locale.es-ar'), 'flag-ar'],
'fa' => [Lang::get('system::lang.locale.fa'), 'flag-ir'],
'fr' => [Lang::get('system::lang.locale.fr'), 'flag-fr'],
'fr-ca' => [Lang::get('system::lang.locale.fr-ca'), 'flag-ca'],
'hu' => [Lang::get('system::lang.locale.hu'), 'flag-hu'],
'id' => [Lang::get('system::lang.locale.id'), 'flag-id'],
'it' => [Lang::get('system::lang.locale.it'), 'flag-it'],
'ja' => [Lang::get('system::lang.locale.ja'), 'flag-jp'],
'kr' => [Lang::get('system::lang.locale.kr'), 'flag-kr'],
'lt' => [Lang::get('system::lang.locale.lt'), 'flag-lt'],
'lv' => [Lang::get('system::lang.locale.lv'), 'flag-lv'],
'nl' => [Lang::get('system::lang.locale.nl'), 'flag-nl'],
'pt-br' => [Lang::get('system::lang.locale.pt-br'), 'flag-br'],
'pt-pt' => [Lang::get('system::lang.locale.pt-pt'), 'flag-pt'],
'ro' => [Lang::get('system::lang.locale.ro'), 'flag-ro'],
'ru' => [Lang::get('system::lang.locale.ru'), 'flag-ru'],
'fi' => [Lang::get('system::lang.locale.fi'), 'flag-fi'],
'sv' => [Lang::get('system::lang.locale.sv'), 'flag-se'],
'tr' => [Lang::get('system::lang.locale.tr'), 'flag-tr'],
'uk' => [Lang::get('system::lang.locale.uk'), 'flag-ua'],
'pl' => [Lang::get('system::lang.locale.pl'), 'flag-pl'],
'sk' => [Lang::get('system::lang.locale.sk'), 'flag-sk'],
'zh-cn' => [Lang::get('system::lang.locale.zh-cn'), 'flag-cn'],
'zh-tw' => [Lang::get('system::lang.locale.zh-tw'), 'flag-tw'],
'nb-no' => [Lang::get('system::lang.locale.nb-no'), 'flag-no'],
'el' => [Lang::get('system::lang.locale.el'), 'flag-gr'],
];
$locales = Config::get('app.localeOptions', $localeOptions);
// Sort locales alphabetically
asort($locales);
return $locales;
}
/**
* Returns all available timezone options.
* @return array
*/
public function getTimezoneOptions()
{
$timezoneIdentifiers = DateTimeZone::listIdentifiers();
$utcTime = new DateTime('now', new DateTimeZone('UTC'));
$tempTimezones = [];
foreach ($timezoneIdentifiers as $timezoneIdentifier) {
$currentTimezone = new DateTimeZone($timezoneIdentifier);
$tempTimezones[] = [
'offset' => (int) $currentTimezone->getOffset($utcTime),
'identifier' => $timezoneIdentifier
];
}
// Sort the array by offset, identifier ascending
usort($tempTimezones, function ($a, $b) {
return $a['offset'] === $b['offset']
? strcmp($a['identifier'], $b['identifier'])
: $a['offset'] - $b['offset'];
});
$timezoneList = [];
foreach ($tempTimezones as $tz) {
$sign = $tz['offset'] > 0 ? '+' : '-';
$offset = gmdate('H:i', abs($tz['offset']));
$timezoneList[$tz['identifier']] = '(UTC ' . $sign . $offset . ') ' . $tz['identifier'];
}
return $timezoneList;
}
/**
* Returns the theme options for the backend editor.
* @return array
*/
public function getEditorThemeOptions()
{
$themeDir = new DirectoryIterator("modules/backend/formwidgets/codeeditor/assets/vendor/ace/");
$themes = [];
// Iterate through the themes
foreach ($themeDir as $node) {
// If this file is a theme (starting by "theme-")
if (!$node->isDir() && substr($node->getFileName(), 0, 6) == 'theme-') {
// Remove the theme- prefix and the .js suffix, create an user friendly and capitalized name
$themeId = substr($node->getFileName(), 6, -3);
$themeName = ucwords(str_replace("_", " ", $themeId));
// Add the values to the themes array
if ($themeId != static::DEFAULT_THEME) {
$themes[$themeId] = $themeName;
}
}
}
// Sort the theme alphabetically, and push the default theme
asort($themes);
return [static::DEFAULT_THEME => ucwords(static::DEFAULT_THEME)] + $themes;
}
}
| 1 | 12,898 | The indentation on this is off by one space to the left | octobercms-october | php |
@@ -52,7 +52,9 @@ static VkImageSubresourceRange RangeFromLayers(const VkImageSubresourceLayers &s
static VkImageSubresourceRange MakeImageFullRange(const VkImageCreateInfo &create_info) {
const auto format = create_info.format;
VkImageSubresourceRange init_range{0, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS};
- if (FormatIsColor(format) || FormatIsMultiplane(format)) {
+ const VkExternalFormatANDROID *pExternalFormatANDROID = lvl_find_in_chain<VkExternalFormatANDROID>(&create_info);
+ bool isExternalFormatConversion = (pExternalFormatANDROID != nullptr && pExternalFormatANDROID->externalFormat != 0);
+ if (FormatIsColor(format) || FormatIsMultiplane(format) || isExternalFormatConversion) {
init_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Normalization will expand this for multiplane
} else {
init_range.aspectMask = | 1 | /* Copyright (c) 2015-2020 The Khronos Group Inc.
* Copyright (c) 2015-2020 Valve Corporation
* Copyright (c) 2015-2020 LunarG, Inc.
* Copyright (C) 2015-2020 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <[email protected]>
* Author: Dave Houlton <[email protected]>
* Shannon McPherson <[email protected]>
*/
#include <cmath>
#include <set>
#include <sstream>
#include <string>
#include "vk_enum_string_helper.h"
#include "vk_format_utils.h"
#include "vk_layer_data.h"
#include "vk_layer_utils.h"
#include "vk_layer_logging.h"
#include "vk_typemap_helper.h"
#include "chassis.h"
#include "core_validation.h"
#include "shader_validation.h"
#include "descriptor_sets.h"
#include "buffer_validation.h"
// Transfer VkImageSubresourceLayers into VkImageSubresourceRange struct
static VkImageSubresourceRange RangeFromLayers(const VkImageSubresourceLayers &subresource_layers) {
VkImageSubresourceRange subresource_range;
subresource_range.aspectMask = subresource_layers.aspectMask;
subresource_range.baseArrayLayer = subresource_layers.baseArrayLayer;
subresource_range.layerCount = subresource_layers.layerCount;
subresource_range.baseMipLevel = subresource_layers.mipLevel;
subresource_range.levelCount = 1;
return subresource_range;
}
static VkImageSubresourceRange MakeImageFullRange(const VkImageCreateInfo &create_info) {
const auto format = create_info.format;
VkImageSubresourceRange init_range{0, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS};
if (FormatIsColor(format) || FormatIsMultiplane(format)) {
init_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Normalization will expand this for multiplane
} else {
init_range.aspectMask =
(FormatHasDepth(format) ? VK_IMAGE_ASPECT_DEPTH_BIT : 0) | (FormatHasStencil(format) ? VK_IMAGE_ASPECT_STENCIL_BIT : 0);
}
return NormalizeSubresourceRange(create_info, init_range);
}
std::vector<VkImageView> FRAMEBUFFER_STATE::GetUsedAttachments(
const safe_VkSubpassDescription2 &subpasses, const std::vector<IMAGE_VIEW_STATE *> &imagelessFramebufferAttachments) {
std::vector<VkImageView> attachment_views(createInfo.attachmentCount, VK_NULL_HANDLE);
const bool imageless = (createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) ? true : false;
for (uint32_t index = 0; index < subpasses.inputAttachmentCount; ++index) {
const uint32_t attachment_index = subpasses.pInputAttachments[index].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
if (imageless) {
attachment_views[attachment_index] = imagelessFramebufferAttachments[attachment_index]->image_view;
} else {
attachment_views[attachment_index] = createInfo.pAttachments[attachment_index];
}
}
}
for (uint32_t index = 0; index < subpasses.colorAttachmentCount; ++index) {
const uint32_t attachment_index = subpasses.pColorAttachments[index].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
if (imageless) {
attachment_views[attachment_index] = imagelessFramebufferAttachments[attachment_index]->image_view;
} else {
attachment_views[attachment_index] = createInfo.pAttachments[attachment_index];
}
}
if (subpasses.pResolveAttachments) {
const uint32_t attachment_index2 = subpasses.pResolveAttachments[index].attachment;
if (attachment_index2 != VK_ATTACHMENT_UNUSED) {
if (imageless) {
attachment_views[attachment_index2] = imagelessFramebufferAttachments[attachment_index2]->image_view;
} else {
attachment_views[attachment_index2] = createInfo.pAttachments[attachment_index2];
}
}
}
}
if (subpasses.pDepthStencilAttachment) {
const uint32_t attachment_index = subpasses.pDepthStencilAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
if (imageless) {
attachment_views[attachment_index] = imagelessFramebufferAttachments[attachment_index]->image_view;
} else {
attachment_views[attachment_index] = createInfo.pAttachments[attachment_index];
}
}
}
return attachment_views;
}
IMAGE_STATE::IMAGE_STATE(VkDevice dev, VkImage img, const VkImageCreateInfo *pCreateInfo)
: image(img),
safe_create_info(pCreateInfo),
createInfo(*safe_create_info.ptr()),
valid(false),
acquired(false),
shared_presentable(false),
layout_locked(false),
get_sparse_reqs_called(false),
sparse_metadata_required(false),
sparse_metadata_bound(false),
has_ahb_format(false),
is_swapchain_image(false),
ahb_format(0),
full_range{MakeImageFullRange(createInfo)},
create_from_swapchain(VK_NULL_HANDLE),
bind_swapchain(VK_NULL_HANDLE),
bind_swapchain_imageIndex(0),
range_encoder(full_range),
disjoint(false),
plane0_memory_requirements_checked(false),
plane1_memory_requirements_checked(false),
plane2_memory_requirements_checked(false),
subresource_encoder(full_range),
fragment_encoder(nullptr),
store_device_as_workaround(dev), // TODO REMOVE WHEN encoder can be const
sparse_requirements{} {
if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
uint32_t *pQueueFamilyIndices = new uint32_t[createInfo.queueFamilyIndexCount];
for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) {
pQueueFamilyIndices[i] = pCreateInfo->pQueueFamilyIndices[i];
}
createInfo.pQueueFamilyIndices = pQueueFamilyIndices;
}
if (createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
sparse = true;
}
auto *externalMemoryInfo = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(pCreateInfo->pNext);
if (externalMemoryInfo) {
external_memory_handle = externalMemoryInfo->handleTypes;
}
}
bool IMAGE_STATE::IsCreateInfoEqual(const VkImageCreateInfo &other_createInfo) const {
bool is_equal = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags);
is_equal = is_equal && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo);
is_equal = is_equal && IsMipLevelsEqual(other_createInfo) && IsArrayLayersEqual(other_createInfo);
is_equal = is_equal && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo);
is_equal = is_equal && IsExtentEqual(other_createInfo) && IsTilingEqual(other_createInfo);
is_equal = is_equal && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo);
return is_equal &&
((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) ? IsQueueFamilyIndicesEqual(other_createInfo) : true);
}
// Check image compatibility rules for VK_NV_dedicated_allocation_image_aliasing
bool IMAGE_STATE::IsCreateInfoDedicatedAllocationImageAliasingCompatible(const VkImageCreateInfo &other_createInfo) const {
bool is_compatible = (createInfo.sType == other_createInfo.sType) && (createInfo.flags == other_createInfo.flags);
is_compatible = is_compatible && IsImageTypeEqual(other_createInfo) && IsFormatEqual(other_createInfo);
is_compatible = is_compatible && IsMipLevelsEqual(other_createInfo);
is_compatible = is_compatible && IsUsageEqual(other_createInfo) && IsInitialLayoutEqual(other_createInfo);
is_compatible = is_compatible && IsSamplesEqual(other_createInfo) && IsSharingModeEqual(other_createInfo);
is_compatible = is_compatible &&
((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) ? IsQueueFamilyIndicesEqual(other_createInfo) : true);
is_compatible = is_compatible && IsTilingEqual(other_createInfo);
is_compatible = is_compatible && createInfo.extent.width <= other_createInfo.extent.width &&
createInfo.extent.height <= other_createInfo.extent.height &&
createInfo.extent.depth <= other_createInfo.extent.depth &&
createInfo.arrayLayers <= other_createInfo.arrayLayers;
return is_compatible;
}
bool IMAGE_STATE::IsCompatibleAliasing(IMAGE_STATE *other_image_state) {
if (!is_swapchain_image && !other_image_state->is_swapchain_image &&
!(createInfo.flags & other_image_state->createInfo.flags & VK_IMAGE_CREATE_ALIAS_BIT))
return false;
if ((create_from_swapchain == VK_NULL_HANDLE) && binding.mem_state &&
(binding.mem_state == other_image_state->binding.mem_state) && (binding.offset == other_image_state->binding.offset) &&
IsCreateInfoEqual(other_image_state->createInfo)) {
return true;
}
if ((bind_swapchain == other_image_state->bind_swapchain) && (bind_swapchain != VK_NULL_HANDLE)) {
return true;
}
return false;
}
IMAGE_VIEW_STATE::IMAGE_VIEW_STATE(const std::shared_ptr<IMAGE_STATE> &im, VkImageView iv, const VkImageViewCreateInfo *ci)
: image_view(iv),
create_info(*ci),
normalized_subresource_range(NormalizeSubresourceRange(*im, ci->subresourceRange)),
range_generator(im->subresource_encoder, normalized_subresource_range),
samplerConversion(VK_NULL_HANDLE),
image_state(im) {
auto *conversionInfo = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info.pNext);
if (conversionInfo) samplerConversion = conversionInfo->conversion;
if (image_state) {
// A light normalization of the createInfo range
auto &sub_res_range = create_info.subresourceRange;
sub_res_range.levelCount = ResolveRemainingLevels(&sub_res_range, image_state->createInfo.mipLevels);
sub_res_range.layerCount = ResolveRemainingLayers(&sub_res_range, image_state->createInfo.arrayLayers);
// Cache a full normalization (for "full image/whole image" comparisons)
// normalized_subresource_range = NormalizeSubresourceRange(*image_state, ci->subresourceRange);
samples = image_state->createInfo.samples;
descriptor_format_bits = DescriptorRequirementsBitsFromFormat(create_info.format);
}
}
bool IMAGE_VIEW_STATE::OverlapSubresource(const IMAGE_VIEW_STATE &compare_view) const {
if (image_view == compare_view.image_view) {
return true;
}
if (image_state->image != compare_view.image_state->image) {
return false;
}
if (normalized_subresource_range.aspectMask != compare_view.normalized_subresource_range.aspectMask) {
return false;
}
// compare if overlap mip level
if ((normalized_subresource_range.baseMipLevel < compare_view.normalized_subresource_range.baseMipLevel) &&
((normalized_subresource_range.baseMipLevel + normalized_subresource_range.levelCount) <=
compare_view.normalized_subresource_range.baseMipLevel)) {
return false;
}
if ((normalized_subresource_range.baseMipLevel > compare_view.normalized_subresource_range.baseMipLevel) &&
(normalized_subresource_range.baseMipLevel >=
(compare_view.normalized_subresource_range.baseMipLevel + compare_view.normalized_subresource_range.levelCount))) {
return false;
}
// compare if overlap array layer
if ((normalized_subresource_range.baseArrayLayer < compare_view.normalized_subresource_range.baseArrayLayer) &&
((normalized_subresource_range.baseArrayLayer + normalized_subresource_range.layerCount) <=
compare_view.normalized_subresource_range.baseArrayLayer)) {
return false;
}
if ((normalized_subresource_range.baseArrayLayer > compare_view.normalized_subresource_range.baseArrayLayer) &&
(normalized_subresource_range.baseArrayLayer >=
(compare_view.normalized_subresource_range.baseArrayLayer + compare_view.normalized_subresource_range.layerCount))) {
return false;
}
return true;
}
uint32_t FullMipChainLevels(uint32_t height, uint32_t width, uint32_t depth) {
// uint cast applies floor()
return 1u + (uint32_t)log2(std::max({height, width, depth}));
}
uint32_t FullMipChainLevels(VkExtent3D extent) { return FullMipChainLevels(extent.height, extent.width, extent.depth); }
uint32_t FullMipChainLevels(VkExtent2D extent) { return FullMipChainLevels(extent.height, extent.width); }
bool CoreChecks::FindLayouts(VkImage image, std::vector<VkImageLayout> &layouts) const {
auto image_state = GetImageState(image);
if (!image_state) return false;
const auto *layout_range_map = GetLayoutRangeMap(imageLayoutMap, image);
if (!layout_range_map) return false;
// TODO: FindLayouts function should mutate into a ValidatePresentableLayout with the loop wrapping the LogError
// from the caller. You can then use decode to add the subresource of the range::begin to the error message.
// TODO: what is this test and what is it supposed to do?! -- the logic doesn't match the comment below?!
// TODO: Make this robust for >1 aspect mask. Now it will just say ignore potential errors in this case.
if (layout_range_map->size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
return false;
}
for (auto entry : *layout_range_map) {
layouts.push_back(entry.second);
}
return true;
}
// Set image layout for given VkImageSubresourceRange struct
void CoreChecks::SetImageLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &image_subresource_range, VkImageLayout layout,
VkImageLayout expected_layout) {
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state);
assert(subresource_map); // the non-const getter must return a valid pointer
if (subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout)) {
cb_node->image_layout_change_count++; // Change the version of this data to force revalidation
}
for (const auto &image : image_state.aliasing_images) {
auto alias_state = GetImageState(image);
// The map state of the aliases should all be in sync, so no need to check the return value
subresource_map = GetImageSubresourceLayoutMap(cb_node, *alias_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout);
}
}
// Set the initial image layout for all slices of an image view
void CoreChecks::SetImageViewInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout) {
if (disabled[image_layout_validation]) {
return;
}
IMAGE_STATE *image_state = view_state.image_state.get();
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, *image_state);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, layout, view_state);
for (const auto &image : image_state->aliasing_images) {
image_state = GetImageState(image);
subresource_map = GetImageSubresourceLayoutMap(cb_node, *image_state);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, layout, view_state);
}
}
// Set the initial image layout for a passed non-normalized subresource range
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &range, VkImageLayout layout) {
auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, NormalizeSubresourceRange(image_state, range), layout);
for (const auto &image : image_state.aliasing_images) {
auto alias_state = GetImageState(image);
subresource_map = GetImageSubresourceLayoutMap(cb_node, *alias_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, NormalizeSubresourceRange(*alias_state, range), layout);
}
}
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, VkImage image, const VkImageSubresourceRange &range,
VkImageLayout layout) {
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) return;
SetImageInitialLayout(cb_node, *image_state, range, layout);
};
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceLayers &layers, VkImageLayout layout) {
SetImageInitialLayout(cb_node, image_state, RangeFromLayers(layers), layout);
}
// Set image layout for all slices of an image view
void CoreChecks::SetImageViewLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout,
VkImageLayout layoutStencil) {
IMAGE_STATE *image_state = view_state.image_state.get();
VkImageSubresourceRange sub_range = view_state.normalized_subresource_range;
// When changing the layout of a 3D image subresource via a 2D or 2D_ARRRAY image view, all depth slices of
// the subresource mip level(s) are transitioned, ignoring any layers restriction in the subresource info.
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) && (view_state.create_info.viewType != VK_IMAGE_VIEW_TYPE_3D)) {
sub_range.baseArrayLayer = 0;
sub_range.layerCount = image_state->createInfo.extent.depth;
}
if (sub_range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) && layoutStencil != kInvalidLayout) {
sub_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
SetImageLayout(cb_node, *image_state, sub_range, layout);
sub_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
SetImageLayout(cb_node, *image_state, sub_range, layoutStencil);
} else {
SetImageLayout(cb_node, *image_state, sub_range, layout);
}
}
bool CoreChecks::ValidateRenderPassLayoutAgainstFramebufferImageUsage(RenderPassCreateVersion rp_version, VkImageLayout layout,
VkImage image, VkImageView image_view,
VkFramebuffer framebuffer, VkRenderPass renderpass,
uint32_t attachment_index, const char *variable_name) const {
bool skip = false;
auto image_state = GetImageState(image);
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *function_name = use_rp2 ? "vkCmdBeginRenderPass2()" : "vkCmdBeginRenderPass()";
if (!image_state) {
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |=
LogError(image, "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"%s: RenderPass %s uses %s where pAttachments[%" PRIu32 "] = %s, which refers to an invalid image",
function_name, report_data->FormatHandle(renderpass).c_str(), report_data->FormatHandle(framebuffer).c_str(),
attachment_index, report_data->FormatHandle(image_view).c_str());
return skip;
}
auto image_usage = image_state->createInfo.usage;
const auto stencil_usage_info = lvl_find_in_chain<VkImageStencilUsageCreateInfo>(image_state->createInfo.pNext);
if (stencil_usage_info) {
image_usage |= stencil_usage_info->stencilUsage;
}
// Check for layouts that mismatch image usages in the framebuffer
if (layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03094" : "VUID-vkCmdBeginRenderPass-initialLayout-00895";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
!(image_usage & (VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT))) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03097" : "VUID-vkCmdBeginRenderPass-initialLayout-00897";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT or VK_IMAGE_USAGE_SAMPLED_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03098" : "VUID-vkCmdBeginRenderPass-initialLayout-00898";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_SRC_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03099" : "VUID-vkCmdBeginRenderPass-initialLayout-00899";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_DST_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (device_extensions.vk_khr_maintenance2) {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03096" : "VUID-vkCmdBeginRenderPass-initialLayout-01758";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
} else {
// The create render pass 2 extension requires maintenance 2 (the previous branch), so no vuid switch needed here.
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, "VUID-vkCmdBeginRenderPass-initialLayout-00896",
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassLayouts(RenderPassCreateVersion rp_version, const CMD_BUFFER_STATE *pCB,
const VkRenderPassBeginInfo *pRenderPassBegin,
const FRAMEBUFFER_STATE *framebuffer_state) const {
bool skip = false;
auto const pRenderPassInfo = GetRenderPassState(pRenderPassBegin->renderPass)->createInfo.ptr();
auto const &framebufferInfo = framebuffer_state->createInfo;
const VkImageView *attachments = framebufferInfo.pAttachments;
auto render_pass = GetRenderPassState(pRenderPassBegin->renderPass)->renderPass;
auto framebuffer = framebuffer_state->framebuffer;
if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using a framebuffer with a different number of attachments.");
}
const auto *attachmentInfo = lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pRenderPassBegin->pNext);
if (((framebufferInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) != 0) && attachmentInfo != nullptr) {
attachments = attachmentInfo->pAttachments;
}
if (attachments != nullptr) {
const auto *const_pCB = static_cast<const CMD_BUFFER_STATE *>(pCB);
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
auto image_view = attachments[i];
auto view_state = GetImageViewState(image_view);
if (!view_state) {
LogObjectList objlist(pRenderPassBegin->renderPass);
objlist.add(framebuffer_state->framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s is not a valid VkImageView handle",
report_data->FormatHandle(framebuffer_state->framebuffer).c_str(), i,
report_data->FormatHandle(image_view).c_str());
continue;
}
const VkImage image = view_state->create_info.image;
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) {
LogObjectList objlist(pRenderPassBegin->renderPass);
objlist.add(framebuffer_state->framebuffer);
objlist.add(image_view);
objlist.add(image);
skip |= LogError(objlist, "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s references non-extant %s.",
report_data->FormatHandle(framebuffer_state->framebuffer).c_str(), i,
report_data->FormatHandle(image_view).c_str(), report_data->FormatHandle(image).c_str());
continue;
}
auto attachment_initial_layout = pRenderPassInfo->pAttachments[i].initialLayout;
auto final_layout = pRenderPassInfo->pAttachments[i].finalLayout;
// Default to expecting stencil in the same layout.
auto attachment_stencil_initial_layout = attachment_initial_layout;
// If a separate layout is specified, look for that.
const auto *attachment_description_stencil_layout =
lvl_find_in_chain<VkAttachmentDescriptionStencilLayoutKHR>(pRenderPassInfo->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
attachment_stencil_initial_layout = attachment_description_stencil_layout->stencilInitialLayout;
}
// Cast pCB to const because we don't want to create entries that don't exist here (in case the key changes to something
// in common with the non-const version.)
const ImageSubresourceLayoutMap *subresource_map =
(attachment_initial_layout != VK_IMAGE_LAYOUT_UNDEFINED) ? GetImageSubresourceLayoutMap(const_pCB, image) : nullptr;
if (subresource_map) { // If no layout information for image yet, will be checked at QueueSubmit time
LayoutUseCheckAndMessage layout_check(subresource_map);
bool subres_skip = false;
auto pos = subresource_map->Find(view_state->normalized_subresource_range);
for (; pos != subresource_map->End() && !subres_skip; ++pos) {
const VkImageSubresource &subres = pos->subresource;
// Allow for differing depth and stencil layouts
VkImageLayout check_layout = attachment_initial_layout;
if (subres.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) check_layout = attachment_stencil_initial_layout;
if (!layout_check.Check(subres, check_layout, pos->current_layout, pos->initial_layout)) {
subres_skip |= LogError(
device, kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using attachment %u where the render pass initial layout is %s "
"and the %s layout of the attachment is %s. The layouts must match, or the render "
"pass initial layout for the attachment must be VK_IMAGE_LAYOUT_UNDEFINED",
i, string_VkImageLayout(check_layout), layout_check.message, string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_initial_layout, image, image_view,
framebuffer, render_pass, i, "initial layout");
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, final_layout, image, image_view, framebuffer,
render_pass, i, "final layout");
}
for (uint32_t j = 0; j < pRenderPassInfo->subpassCount; ++j) {
auto &subpass = pRenderPassInfo->pSubpasses[j];
for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].inputAttachmentCount; ++k) {
auto &attachment_ref = subpass.pInputAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].colorAttachmentCount; ++k) {
auto &attachment_ref = subpass.pColorAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"color attachment layout");
if (subpass.pResolveAttachments) {
ValidateRenderPassLayoutAgainstFramebufferImageUsage(
rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass,
attachment_ref.attachment, "resolve attachment layout");
}
}
}
}
if (pRenderPassInfo->pSubpasses[j].pDepthStencilAttachment) {
auto &attachment_ref = *subpass.pDepthStencilAttachment;
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
}
}
return skip;
}
void CoreChecks::TransitionAttachmentRefLayout(CMD_BUFFER_STATE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
const safe_VkAttachmentReference2 &ref) {
if (ref.attachment != VK_ATTACHMENT_UNUSED) {
IMAGE_VIEW_STATE *image_view = nullptr;
if (pFramebuffer->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) {
const auto attachment_info =
lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pCB->activeRenderPassBeginInfo.pNext);
if (attachment_info) image_view = GetImageViewState(attachment_info->pAttachments[ref.attachment]);
} else {
image_view = GetAttachmentImageViewState(pCB, pFramebuffer, ref.attachment);
}
if (image_view) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_reference_stencil_layout = lvl_find_in_chain<VkAttachmentReferenceStencilLayoutKHR>(ref.pNext);
if (attachment_reference_stencil_layout) {
stencil_layout = attachment_reference_stencil_layout->stencilLayout;
}
SetImageViewLayout(pCB, *image_view, ref.layout, stencil_layout);
}
}
}
void CoreChecks::TransitionSubpassLayouts(CMD_BUFFER_STATE *pCB, const RENDER_PASS_STATE *render_pass_state,
const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) {
assert(render_pass_state);
if (framebuffer_state) {
auto const &subpass = render_pass_state->createInfo.pSubpasses[subpass_index];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pInputAttachments[j]);
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pColorAttachments[j]);
}
if (subpass.pDepthStencilAttachment) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, *subpass.pDepthStencilAttachment);
}
}
}
// Transition the layout state for renderpass attachments based on the BeginRenderPass() call. This includes:
// 1. Transition into initialLayout state
// 2. Transition from initialLayout to layout used in subpass 0
void CoreChecks::TransitionBeginRenderPassLayouts(CMD_BUFFER_STATE *cb_state, const RENDER_PASS_STATE *render_pass_state,
FRAMEBUFFER_STATE *framebuffer_state) {
// First transition into initialLayout
auto const rpci = render_pass_state->createInfo.ptr();
for (uint32_t i = 0; i < rpci->attachmentCount; ++i) {
IMAGE_VIEW_STATE *view_state = nullptr;
if (framebuffer_state->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) {
const auto attachment_info =
lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(cb_state->activeRenderPassBeginInfo.pNext);
if (attachment_info) view_state = GetImageViewState(attachment_info->pAttachments[i]);
} else {
view_state = GetAttachmentImageViewState(cb_state, framebuffer_state, i);
}
if (view_state) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_description_stencil_layout =
lvl_find_in_chain<VkAttachmentDescriptionStencilLayoutKHR>(rpci->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
stencil_layout = attachment_description_stencil_layout->stencilInitialLayout;
}
SetImageViewLayout(cb_state, *view_state, rpci->pAttachments[i].initialLayout, stencil_layout);
}
}
// Now transition for first subpass (index 0)
TransitionSubpassLayouts(cb_state, render_pass_state, 0, framebuffer_state);
}
bool VerifyAspectsPresent(VkImageAspectFlags aspect_mask, VkFormat format) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != 0) {
if (!(FormatIsColor(format) || FormatIsMultiplane(format))) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) {
if (!FormatHasDepth(format)) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) {
if (!FormatHasStencil(format)) return false;
}
if (0 !=
(aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR))) {
if (FormatPlaneCount(format) == 1) return false;
}
return true;
}
// Verify an ImageMemoryBarrier's old/new ImageLayouts are compatible with the Image's ImageUsageFlags.
bool CoreChecks::ValidateBarrierLayoutToImageUsage(const VkImageMemoryBarrier &img_barrier, bool new_not_old,
VkImageUsageFlags usage_flags, const char *func_name,
const char *barrier_pname) const {
bool skip = false;
const VkImageLayout layout = (new_not_old) ? img_barrier.newLayout : img_barrier.oldLayout;
const char *msg_code = kVUIDUndefined; // sentinel value meaning "no error"
switch (layout) {
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01208";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01209";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01210";
}
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
if ((usage_flags & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01211";
}
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01212";
}
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01213";
}
break;
case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV:
if ((usage_flags & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-02088";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01658";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01659";
}
break;
default:
// Other VkImageLayout values do not have VUs defined in this context.
break;
}
if (msg_code != kVUIDUndefined) {
skip |= LogError(img_barrier.image, msg_code,
"%s: Image barrier %s %s Layout=%s is not compatible with %s usage flags 0x%" PRIx32 ".", func_name,
barrier_pname, ((new_not_old) ? "new" : "old"), string_VkImageLayout(layout),
report_data->FormatHandle(img_barrier.image).c_str(), usage_flags);
}
return skip;
}
// Verify image barriers are compatible with the images they reference.
bool CoreChecks::ValidateBarriersToImages(const CMD_BUFFER_STATE *cb_state, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers, const char *func_name) const {
bool skip = false;
// Scoreboard for checking for duplicate and inconsistent barriers to images
struct ImageBarrierScoreboardEntry {
uint32_t index;
// This is designed for temporary storage within the scope of the API call. If retained storage of the barriers is
// required, copies should be made and smart or unique pointers used in some other stucture (or this one refactored)
const VkImageMemoryBarrier *barrier;
};
using ImageBarrierScoreboardSubresMap = std::unordered_map<VkImageSubresourceRange, ImageBarrierScoreboardEntry>;
using ImageBarrierScoreboardImageMap = std::unordered_map<VkImage, ImageBarrierScoreboardSubresMap>;
// Scoreboard for duplicate layout transition barriers within the list
// Pointers retained in the scoreboard only have the lifetime of *this* call (i.e. within the scope of the API call)
ImageBarrierScoreboardImageMap layout_transitions;
for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) {
const auto &img_barrier = pImageMemoryBarriers[i];
const std::string barrier_pname = "pImageMemoryBarrier[" + std::to_string(i) + "]";
// Update the scoreboard of layout transitions and check for barriers affecting the same image and subresource
// TODO: a higher precision could be gained by adapting the command_buffer image_layout_map logic looking for conflicts
// at a per sub-resource level
if (img_barrier.oldLayout != img_barrier.newLayout) {
const ImageBarrierScoreboardEntry new_entry{i, &img_barrier};
const auto image_it = layout_transitions.find(img_barrier.image);
if (image_it != layout_transitions.end()) {
auto &subres_map = image_it->second;
auto subres_it = subres_map.find(img_barrier.subresourceRange);
if (subres_it != subres_map.end()) {
auto &entry = subres_it->second;
if ((entry.barrier->newLayout != img_barrier.oldLayout) &&
(img_barrier.oldLayout != VK_IMAGE_LAYOUT_UNDEFINED)) {
const VkImageSubresourceRange &range = img_barrier.subresourceRange;
skip = LogError(
cb_state->commandBuffer, "VUID-VkImageMemoryBarrier-oldLayout-01197",
"%s: %s conflicts with earlier entry pImageMemoryBarrier[%u]. %s"
" subresourceRange: aspectMask=%u baseMipLevel=%u levelCount=%u, baseArrayLayer=%u, layerCount=%u; "
"conflicting barrier transitions image layout from %s when earlier barrier transitioned to layout %s.",
func_name, barrier_pname.c_str(), entry.index, report_data->FormatHandle(img_barrier.image).c_str(),
range.aspectMask, range.baseMipLevel, range.levelCount, range.baseArrayLayer, range.layerCount,
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(entry.barrier->newLayout));
}
entry = new_entry;
} else {
subres_map[img_barrier.subresourceRange] = new_entry;
}
} else {
layout_transitions[img_barrier.image][img_barrier.subresourceRange] = new_entry;
}
}
auto image_state = GetImageState(img_barrier.image);
if (image_state) {
VkImageUsageFlags usage_flags = image_state->createInfo.usage;
skip |= ValidateBarrierLayoutToImageUsage(img_barrier, false, usage_flags, func_name, barrier_pname.c_str());
skip |= ValidateBarrierLayoutToImageUsage(img_barrier, true, usage_flags, func_name, barrier_pname.c_str());
// Make sure layout is able to be transitioned, currently only presented shared presentable images are locked
if (image_state->layout_locked) {
// TODO: Add unique id for error when available
skip |= LogError(
img_barrier.image, 0,
"%s: Attempting to transition shared presentable %s"
" from layout %s to layout %s, but image has already been presented and cannot have its layout transitioned.",
func_name, report_data->FormatHandle(img_barrier.image).c_str(), string_VkImageLayout(img_barrier.oldLayout),
string_VkImageLayout(img_barrier.newLayout));
}
const VkImageCreateInfo &image_create_info = image_state->createInfo;
const VkFormat image_format = image_create_info.format;
const VkImageAspectFlags aspect_mask = img_barrier.subresourceRange.aspectMask;
// For a Depth/Stencil image both aspects MUST be set
if (FormatIsDepthAndStencil(image_format)) {
if (enabled_features.core12.separateDepthStencilLayouts) {
if (!(aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
skip |=
LogError(img_barrier.image, "VUID-VkImageMemoryBarrier-image-03319",
"%s: Image barrier %s references %s of format %s that must have either the depth or stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
} else {
auto const ds_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if ((aspect_mask & ds_mask) != (ds_mask)) {
const char *vuid = device_extensions.vk_khr_separate_depth_stencil_layouts
? "VUID-VkImageMemoryBarrier-image-03320"
: "VUID-VkImageMemoryBarrier-image-01207";
skip |= LogError(img_barrier.image, vuid,
"%s: Image barrier %s references %s of format %s that must have the depth and stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
}
}
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_state, img_barrier.image);
if (img_barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
// Not sure if this needs to be in the ForRange traversal, pulling it out as it is currently invariant with
// subresource.
} else if (subresource_map && !QueueFamilyIsExternal(img_barrier.srcQueueFamilyIndex)) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map);
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, img_barrier.subresourceRange);
for (auto pos = subresource_map->Find(normalized_isr); (pos != subresource_map->End()) && !subres_skip; ++pos) {
const auto &value = *pos;
if (!layout_check.Check(value.subresource, img_barrier.oldLayout, value.current_layout, value.initial_layout)) {
subres_skip = LogError(
cb_state->commandBuffer, "VUID-VkImageMemoryBarrier-oldLayout-01197",
"%s: For %s you cannot transition the layout of aspect=%d level=%d layer=%d from %s when the "
"%s layout is %s.",
func_name, report_data->FormatHandle(img_barrier.image).c_str(), value.subresource.aspectMask,
value.subresource.mipLevel, value.subresource.arrayLayer, string_VkImageLayout(img_barrier.oldLayout),
layout_check.message, string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
// checks color format and (single-plane or non-disjoint)
// if ycbcr extension is not supported then single-plane and non-disjoint are always both true
if ((FormatIsColor(image_format) == true) &&
((FormatIsMultiplane(image_format) == false) || (image_state->disjoint == false))) {
if (aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? "VUID-VkImageMemoryBarrier-image-01671"
: "VUID-VkImageMemoryBarrier-image-02902";
skip |= LogError(img_barrier.image, vuid,
"%s: Image barrier %s references %s of format %s that must be only VK_IMAGE_ASPECT_COLOR_BIT, "
"but its aspectMask is 0x%" PRIx32 ".",
func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
}
VkImageAspectFlags valid_disjoint_mask =
VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_COLOR_BIT;
if ((FormatIsMultiplane(image_format) == true) && (image_state->disjoint == true) &&
((aspect_mask & valid_disjoint_mask) == 0)) {
skip |= LogError(img_barrier.image, "VUID-VkImageMemoryBarrier-image-01672",
"%s: Image barrier %s references %s of format %s has aspectMask (0x%" PRIx32
") but needs to include either an VK_IMAGE_ASPECT_PLANE_*_BIT or VK_IMAGE_ASPECT_COLOR_BIT.",
func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
if ((FormatPlaneCount(image_format) == 2) && ((aspect_mask & VK_IMAGE_ASPECT_PLANE_2_BIT) != 0)) {
skip |= LogError(img_barrier.image, "VUID-VkImageMemoryBarrier-image-01673",
"%s: Image barrier %s references %s of format %s has only two planes but included "
"VK_IMAGE_ASPECT_PLANE_2_BIT in its aspectMask (0x%" PRIx32 ").",
func_name, barrier_pname.c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
}
}
return skip;
}
bool CoreChecks::IsReleaseOp(CMD_BUFFER_STATE *cb_state, const VkImageMemoryBarrier &barrier) const {
if (!IsTransferOp(&barrier)) return false;
auto pool = cb_state->command_pool.get();
return pool && TempIsReleaseOp<VkImageMemoryBarrier, true>(pool, &barrier);
}
template <typename Barrier>
bool CoreChecks::ValidateQFOTransferBarrierUniqueness(const char *func_name, const CMD_BUFFER_STATE *cb_state,
uint32_t barrier_count, const Barrier *barriers) const {
using BarrierRecord = QFOTransferBarrier<Barrier>;
bool skip = false;
auto pool = cb_state->command_pool.get();
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename BarrierRecord::Tag());
const char *barrier_name = BarrierRecord::BarrierName();
const char *handle_name = BarrierRecord::HandleName();
const char *transfer_type = nullptr;
for (uint32_t b = 0; b < barrier_count; b++) {
if (!IsTransferOp(&barriers[b])) continue;
const BarrierRecord *barrier_record = nullptr;
if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer */>(pool, &barriers[b]) &&
!QueueFamilyIsExternal(barriers[b].dstQueueFamilyIndex)) {
const auto found = barrier_sets.release.find(barriers[b]);
if (found != barrier_sets.release.cend()) {
barrier_record = &(*found);
transfer_type = "releasing";
}
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barriers[b]) &&
!QueueFamilyIsExternal(barriers[b].srcQueueFamilyIndex)) {
const auto found = barrier_sets.acquire.find(barriers[b]);
if (found != barrier_sets.acquire.cend()) {
barrier_record = &(*found);
transfer_type = "acquiring";
}
}
if (barrier_record != nullptr) {
skip |= LogWarning(cb_state->commandBuffer, BarrierRecord::ErrMsgDuplicateQFOInCB(),
"%s: %s at index %" PRIu32 " %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier recorded in this command buffer.",
func_name, barrier_name, b, transfer_type, handle_name,
report_data->FormatHandle(barrier_record->handle).c_str(), barrier_record->srcQueueFamilyIndex,
barrier_record->dstQueueFamilyIndex);
}
}
return skip;
}
VulkanTypedHandle BarrierTypedHandle(const VkImageMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage);
}
const IMAGE_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkImageMemoryBarrier &barrier) {
return device_state.GetImageState(barrier.image);
}
VulkanTypedHandle BarrierTypedHandle(const VkBufferMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer);
}
const BUFFER_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkBufferMemoryBarrier &barrier) {
return device_state.GetBufferState(barrier.buffer);
}
VkBuffer BarrierHandle(const VkBufferMemoryBarrier &barrier) { return barrier.buffer; }
template <typename Barrier>
void CoreChecks::RecordBarrierArrayValidationInfo(const char *func_name, CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const Barrier *barriers) {
auto pool = cb_state->command_pool.get();
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename QFOTransferBarrier<Barrier>::Tag());
for (uint32_t b = 0; b < barrier_count; b++) {
auto &barrier = barriers[b];
if (IsTransferOp(&barrier)) {
if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer*/>(pool, &barrier) &&
!QueueFamilyIsExternal(barrier.dstQueueFamilyIndex)) {
barrier_sets.release.emplace(barrier);
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barrier) &&
!QueueFamilyIsExternal(barrier.srcQueueFamilyIndex)) {
barrier_sets.acquire.emplace(barrier);
}
}
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
if (!QueueFamilyIsIgnored(src_queue_family) && !QueueFamilyIsIgnored(dst_queue_family)) {
// Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
// TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
auto handle_state = BarrierHandleState(*this, barrier);
bool mode_concurrent = handle_state ? handle_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT : false;
if (!mode_concurrent) {
const auto typed_handle = BarrierTypedHandle(barrier);
cb_state->queue_submit_functions.emplace_back(
[func_name, cb_state, typed_handle, src_queue_family, dst_queue_family](
const ValidationStateTracker *device_data, const QUEUE_STATE *queue_state) {
return ValidateConcurrentBarrierAtSubmit(device_data, queue_state, func_name, cb_state, typed_handle,
src_queue_family, dst_queue_family);
});
}
}
}
}
bool CoreChecks::ValidateBarriersQFOTransferUniqueness(const char *func_name, const CMD_BUFFER_STATE *cb_state,
uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers,
uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) const {
bool skip = false;
skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, bufferBarrierCount, pBufferMemBarriers);
skip |= ValidateQFOTransferBarrierUniqueness(func_name, cb_state, imageMemBarrierCount, pImageMemBarriers);
return skip;
}
void CoreChecks::RecordBarrierValidationInfo(const char *func_name, CMD_BUFFER_STATE *cb_state, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
RecordBarrierArrayValidationInfo(func_name, cb_state, bufferBarrierCount, pBufferMemBarriers);
RecordBarrierArrayValidationInfo(func_name, cb_state, imageMemBarrierCount, pImageMemBarriers);
}
template <typename BarrierRecord, typename Scoreboard>
bool CoreChecks::ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const CMD_BUFFER_STATE *cb_state,
const char *operation, const BarrierRecord &barrier, Scoreboard *scoreboard) const {
// Record to the scoreboard or report that we have a duplication
bool skip = false;
auto inserted = scoreboard->insert(std::make_pair(barrier, cb_state));
if (!inserted.second && inserted.first->second != cb_state) {
// This is a duplication (but don't report duplicates from the same CB, as we do that at record time
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(barrier.handle);
objlist.add(inserted.first->second->commandBuffer);
skip = LogWarning(objlist, BarrierRecord::ErrMsgDuplicateQFOInSubmit(),
"%s: %s %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier submitted in this batch from %s.",
"vkQueueSubmit()", BarrierRecord::BarrierName(), operation, BarrierRecord::HandleName(),
report_data->FormatHandle(barrier.handle).c_str(), barrier.srcQueueFamilyIndex,
barrier.dstQueueFamilyIndex, report_data->FormatHandle(inserted.first->second->commandBuffer).c_str());
}
return skip;
}
template <typename Barrier>
bool CoreChecks::ValidateQueuedQFOTransferBarriers(const CMD_BUFFER_STATE *cb_state,
QFOTransferCBScoreboards<Barrier> *scoreboards) const {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
bool skip = false;
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
const GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag());
const char *barrier_name = BarrierRecord::BarrierName();
const char *handle_name = BarrierRecord::HandleName();
// No release should have an extant duplicate (WARNING)
for (const auto &release : cb_barriers.release) {
// Check the global pending release barriers
const auto set_it = global_release_barriers.find(release.handle);
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
const auto found = set_for_handle.find(release);
if (found != set_for_handle.cend()) {
skip |= LogWarning(cb_state->commandBuffer, BarrierRecord::ErrMsgDuplicateQFOSubmitted(),
"%s: %s releasing queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier queued for execution, without intervening acquire operation.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(found->handle).c_str(),
found->srcQueueFamilyIndex, found->dstQueueFamilyIndex);
}
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "releasing", release, &scoreboards->release);
}
// Each acquire must have a matching release (ERROR)
for (const auto &acquire : cb_barriers.acquire) {
const auto set_it = global_release_barriers.find(acquire.handle);
bool matching_release_found = false;
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
matching_release_found = set_for_handle.find(acquire) != set_for_handle.cend();
}
if (!matching_release_found) {
skip |= LogError(cb_state->commandBuffer, BarrierRecord::ErrMsgMissingQFOReleaseInSubmit(),
"%s: in submitted command buffer %s acquiring ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " has no matching release barrier queued for execution.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(acquire.handle).c_str(),
acquire.srcQueueFamilyIndex, acquire.dstQueueFamilyIndex);
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "acquiring", acquire, &scoreboards->acquire);
}
return skip;
}
bool CoreChecks::ValidateQueuedQFOTransfers(const CMD_BUFFER_STATE *cb_state,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) const {
bool skip = false;
skip |= ValidateQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state, qfo_image_scoreboards);
skip |= ValidateQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state, qfo_buffer_scoreboards);
return skip;
}
template <typename Barrier>
void CoreChecks::RecordQueuedQFOTransferBarriers(CMD_BUFFER_STATE *cb_state) {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(TypeTag());
// Add release barriers from this submit to the global map
for (const auto &release : cb_barriers.release) {
// the global barrier list is mapped by resource handle to allow cleanup on resource destruction
// NOTE: We're using [] because creation of a Set is a needed side effect for new handles
global_release_barriers[release.handle].insert(release);
}
// Erase acquired barriers from this submit from the global map -- essentially marking releases as consumed
for (const auto &acquire : cb_barriers.acquire) {
// NOTE: We're not using [] because we don't want to create entries for missing releases
auto set_it = global_release_barriers.find(acquire.handle);
if (set_it != global_release_barriers.end()) {
QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
set_for_handle.erase(acquire);
if (set_for_handle.size() == 0) { // Clean up empty sets
global_release_barriers.erase(set_it);
}
}
}
}
void CoreChecks::RecordQueuedQFOTransfers(CMD_BUFFER_STATE *cb_state) {
RecordQueuedQFOTransferBarriers<VkImageMemoryBarrier>(cb_state);
RecordQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(cb_state);
}
// Avoid making the template globally visible by exporting the one instance of it we need.
void CoreChecks::EraseQFOImageRelaseBarriers(const VkImage &image) { EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image); }
void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t memBarrierCount,
const VkImageMemoryBarrier *pImgMemBarriers) {
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pImgMemBarriers[i];
// For ownership transfers, the barrier is specified twice; as a release
// operation on the yielding queue family, and as an acquire operation
// on the acquiring queue family. This barrier may also include a layout
// transition, which occurs 'between' the two operations. For validation
// purposes it doesn't seem important which side performs the layout
// transition, but it must not be performed twice. We'll arbitrarily
// choose to perform it as part of the acquire operation.
//
// However, we still need to record initial layout for the "initial layout" validation
const bool is_release_op = IsReleaseOp(cb_state, mem_barrier);
auto *image_state = GetImageState(mem_barrier.image);
if (!image_state) continue;
RecordTransitionImageLayout(cb_state, image_state, mem_barrier, is_release_op);
}
}
void CoreChecks::RecordTransitionImageLayout(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state,
const VkImageMemoryBarrier &mem_barrier, bool is_release_op) {
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, mem_barrier.subresourceRange);
const auto &image_create_info = image_state->createInfo;
// Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR flag bit, where <extent.depth> and
// <arrayLayers> can potentially alias. When recording layout for the entire image, pre-emptively record layouts
// for all (potential) layer sub_resources.
if (0 != (image_create_info.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) {
normalized_isr.baseArrayLayer = 0;
normalized_isr.layerCount = image_create_info.extent.depth; // Treat each depth slice as a layer subresource
}
VkImageLayout initial_layout = mem_barrier.oldLayout;
// Layout transitions in external instance are not tracked, so don't validate initial layout.
if (QueueFamilyIsExternal(mem_barrier.srcQueueFamilyIndex)) {
initial_layout = VK_IMAGE_LAYOUT_UNDEFINED;
}
if (is_release_op) {
SetImageInitialLayout(cb_state, *image_state, normalized_isr, mem_barrier.oldLayout);
} else {
SetImageLayout(cb_state, *image_state, normalized_isr, mem_barrier.newLayout, initial_layout);
}
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageAspectFlags aspect_mask,
VkImageLayout explicit_layout, VkImageLayout optimal_layout, const char *caller,
const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error) const {
if (disabled[image_layout_validation]) return false;
assert(cb_node);
assert(image_state);
const auto image = image_state->image;
bool skip = false;
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image);
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map, aspect_mask);
for (auto pos = subresource_map->Find(range); (pos != subresource_map->End()) && !subres_skip; ++pos) {
if (!layout_check.Check(pos->subresource, explicit_layout, pos->current_layout, pos->initial_layout)) {
*error = true;
subres_skip |= LogError(cb_node->commandBuffer, layout_mismatch_msg_code,
"%s: Cannot use %s (layer=%u mip=%u) with specific layout %s that doesn't match the "
"%s layout %s.",
caller, report_data->FormatHandle(image).c_str(), pos->subresource.arrayLayer,
pos->subresource.mipLevel, string_VkImageLayout(explicit_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
// If optimal_layout is not UNDEFINED, check that layout matches optimal for this case
if ((VK_IMAGE_LAYOUT_UNDEFINED != optimal_layout) && (explicit_layout != optimal_layout)) {
if (VK_IMAGE_LAYOUT_GENERAL == explicit_layout) {
if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
skip |= LogPerformanceWarning(cb_node->commandBuffer, kVUID_Core_DrawState_InvalidImageLayout,
"%s: For optimal performance %s layout should be %s instead of GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(optimal_layout));
}
} else if (device_extensions.vk_khr_shared_presentable_image) {
if (image_state->shared_presentable) {
if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != explicit_layout) {
skip |=
LogError(device, layout_invalid_msg_code,
"%s: Layout for shared presentable image is %s but must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.",
caller, string_VkImageLayout(optimal_layout));
}
}
} else {
*error = true;
skip |= LogError(cb_node->commandBuffer, layout_invalid_msg_code,
"%s: Layout for %s is %s but can only be %s or VK_IMAGE_LAYOUT_GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(explicit_layout),
string_VkImageLayout(optimal_layout));
}
}
return skip;
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceLayers &subLayers, VkImageLayout explicit_layout,
VkImageLayout optimal_layout, const char *caller, const char *layout_invalid_msg_code,
const char *layout_mismatch_msg_code, bool *error) const {
return VerifyImageLayout(cb_node, image_state, RangeFromLayers(subLayers), explicit_layout, optimal_layout, caller,
layout_invalid_msg_code, layout_mismatch_msg_code, error);
}
void CoreChecks::TransitionFinalSubpassLayouts(CMD_BUFFER_STATE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
FRAMEBUFFER_STATE *framebuffer_state) {
auto renderPass = GetRenderPassState(pRenderPassBegin->renderPass);
if (!renderPass) return;
const VkRenderPassCreateInfo2KHR *pRenderPassInfo = renderPass->createInfo.ptr();
if (framebuffer_state) {
IMAGE_VIEW_STATE *view_state = nullptr;
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
if (framebuffer_state->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) {
const auto attachment_info = lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pRenderPassBegin->pNext);
if (attachment_info) view_state = GetImageViewState(attachment_info->pAttachments[i]);
} else {
view_state = GetAttachmentImageViewState(pCB, framebuffer_state, i);
}
if (view_state) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_description_stencil_layout =
lvl_find_in_chain<VkAttachmentDescriptionStencilLayoutKHR>(pRenderPassInfo->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
stencil_layout = attachment_description_stencil_layout->stencilFinalLayout;
}
SetImageViewLayout(pCB, *view_state, pRenderPassInfo->pAttachments[i].finalLayout, stencil_layout);
}
}
}
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Android-specific validation that uses types defined only with VK_USE_PLATFORM_ANDROID_KHR
// This could also move into a seperate core_validation_android.cpp file... ?
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const {
bool skip = false;
const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if (ext_fmt_android) {
if (0 != ext_fmt_android->externalFormat) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-pNext-01974",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with non-zero "
"externalFormat, but the VkImageCreateInfo's format is not VK_FORMAT_UNDEFINED.");
}
if (0 != (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT & create_info->flags)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02396",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but flags include VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
if (0 != (~VK_IMAGE_USAGE_SAMPLED_BIT & create_info->usage)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02397",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but usage includes bits (0x%" PRIx64
") other than VK_IMAGE_USAGE_SAMPLED_BIT.",
create_info->usage);
}
if (VK_IMAGE_TILING_OPTIMAL != create_info->tiling) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02398",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but layout is not VK_IMAGE_TILING_OPTIMAL.");
}
}
if ((0 != ext_fmt_android->externalFormat) &&
(ahb_ext_formats_map.find(ext_fmt_android->externalFormat) == ahb_ext_formats_map.end())) {
skip |= LogError(device, "VUID-VkExternalFormatANDROID-externalFormat-01894",
"vkCreateImage(): Chained VkExternalFormatANDROID struct contains a non-zero externalFormat (%" PRIu64
") which has "
"not been previously retrieved by vkGetAndroidHardwareBufferPropertiesANDROID().",
ext_fmt_android->externalFormat);
}
}
if ((nullptr == ext_fmt_android) || (0 == ext_fmt_android->externalFormat)) {
if (VK_FORMAT_UNDEFINED == create_info->format) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-pNext-01975",
"vkCreateImage(): VkImageCreateInfo struct's format is VK_FORMAT_UNDEFINED, but either does not have a "
"chained VkExternalFormatANDROID struct or the struct exists but has an externalFormat of 0.");
}
}
const VkExternalMemoryImageCreateInfo *emici = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(create_info->pNext);
if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
if (create_info->imageType != VK_IMAGE_TYPE_2D) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-pNext-02393",
"vkCreateImage(): VkImageCreateInfo struct with imageType %s has chained VkExternalMemoryImageCreateInfo "
"struct with handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
string_VkImageType(create_info->imageType));
}
if ((create_info->mipLevels != 1) && (create_info->mipLevels != FullMipChainLevels(create_info->extent))) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02394",
"vkCreateImage(): VkImageCreateInfo struct with chained VkExternalMemoryImageCreateInfo struct of "
"handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID "
"specifies mipLevels = %" PRId32 " (full chain mipLevels are %" PRId32 ").",
create_info->mipLevels, FullMipChainLevels(create_info->extent));
}
}
return skip;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(create_info->image);
if (image_state->has_ahb_format) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |= LogError(create_info->image, "VUID-VkImageViewCreateInfo-image-02399",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"format member is %s and must be VK_FORMAT_UNDEFINED.",
string_VkFormat(create_info->format));
}
// Chain must include a compatible ycbcr conversion
bool conv_found = false;
uint64_t external_format = 0;
const VkSamplerYcbcrConversionInfo *ycbcr_conv_info = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info->pNext);
if (ycbcr_conv_info != nullptr) {
VkSamplerYcbcrConversion conv_handle = ycbcr_conv_info->conversion;
if (ycbcr_conversion_ahb_fmt_map.find(conv_handle) != ycbcr_conversion_ahb_fmt_map.end()) {
conv_found = true;
external_format = ycbcr_conversion_ahb_fmt_map.at(conv_handle);
}
}
if ((!conv_found) || (external_format != image_state->ahb_format)) {
skip |= LogError(create_info->image, "VUID-VkImageViewCreateInfo-image-02400",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"an externalFormat (%" PRIu64
") but needs a chained VkSamplerYcbcrConversionInfo struct with a VkSamplerYcbcrConversion created "
"with the same external format.",
image_state->ahb_format);
}
// Errors in create_info swizzles
if (IsIdentitySwizzle(create_info->components) == false) {
skip |= LogError(
create_info->image, "VUID-VkImageViewCreateInfo-image-02401",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"includes one or more non-identity component swizzles, r swizzle = %s, g swizzle = %s, b swizzle = %s, a swizzle "
"= %s.",
string_VkComponentSwizzle(create_info->components.r), string_VkComponentSwizzle(create_info->components.g),
string_VkComponentSwizzle(create_info->components.b), string_VkComponentSwizzle(create_info->components.a));
}
}
return skip;
}
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state != nullptr) {
if (image_state->external_ahb && (0 == image_state->GetBoundMemory().size())) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-01895",
"vkGetImageSubresourceLayout(): Attempt to query layout from an image created with "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType which has not yet been "
"bound to memory.");
}
}
return skip;
}
#else
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const {
return false;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const { return false; }
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const { return false; }
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::ValidateImageFormatFeatures(const VkImageCreateInfo *pCreateInfo) const {
bool skip = false;
// validates based on imageCreateFormatFeatures from vkspec.html#resources-image-creation-limits
VkFormatFeatureFlags tiling_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
const VkImageTiling image_tiling = pCreateInfo->tiling;
const VkFormat image_format = pCreateInfo->format;
if (image_format == VK_FORMAT_UNDEFINED) {
// VU 01975 states format can't be undefined unless an android externalFormat
#ifdef VK_USE_PLATFORM_ANDROID_KHR
const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(pCreateInfo->pNext);
if ((image_tiling == VK_IMAGE_TILING_OPTIMAL) && (ext_fmt_android != nullptr) && (0 != ext_fmt_android->externalFormat)) {
auto it = ahb_ext_formats_map.find(ext_fmt_android->externalFormat);
if (it != ahb_ext_formats_map.end()) {
tiling_features = it->second;
}
}
#endif
} else if (image_tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
uint64_t drm_format_modifier = 0;
const VkImageDrmFormatModifierExplicitCreateInfoEXT *drm_explicit =
lvl_find_in_chain<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo->pNext);
const VkImageDrmFormatModifierListCreateInfoEXT *drm_implicit =
lvl_find_in_chain<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo->pNext);
if (drm_explicit != nullptr) {
drm_format_modifier = drm_explicit->drmFormatModifier;
} else {
// VUID 02261 makes sure its only explict or implict in parameter checking
assert(drm_implicit != nullptr);
for (uint32_t i = 0; i < drm_implicit->drmFormatModifierCount; i++) {
drm_format_modifier |= drm_implicit->pDrmFormatModifiers[i];
}
}
VkFormatProperties2 format_properties_2 = {VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, nullptr};
VkDrmFormatModifierPropertiesListEXT drm_properties_list = {VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT,
nullptr};
format_properties_2.pNext = (void *)&drm_properties_list;
DispatchGetPhysicalDeviceFormatProperties2(physical_device, image_format, &format_properties_2);
std::vector<VkDrmFormatModifierPropertiesEXT> drm_properties;
drm_properties.resize(drm_properties_list.drmFormatModifierCount);
drm_properties_list.pDrmFormatModifierProperties = &drm_properties[0];
DispatchGetPhysicalDeviceFormatProperties2(physical_device, image_format, &format_properties_2);
for (uint32_t i = 0; i < drm_properties_list.drmFormatModifierCount; i++) {
if ((drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifier & drm_format_modifier) != 0) {
tiling_features |= drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifierTilingFeatures;
}
}
} else {
VkFormatProperties format_properties = GetPDFormatProperties(image_format);
tiling_features = (image_tiling == VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures
: format_properties.optimalTilingFeatures;
}
// Lack of disjoint format feature support while using the flag
if (FormatIsMultiplane(image_format) && ((pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT) != 0) &&
((tiling_features & VK_FORMAT_FEATURE_DISJOINT_BIT) == 0)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-imageCreateFormatFeatures-02260",
"vkCreateImage(): can't use VK_IMAGE_CREATE_DISJOINT_BIT because %s doesn't support "
"VK_FORMAT_FEATURE_DISJOINT_BIT based on imageCreateFormatFeatures.",
string_VkFormat(pCreateInfo->format));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageANDROID(report_data, pCreateInfo);
} else { // These checks are omitted or replaced when Android HW Buffer extension is active
if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
return LogError(device, "VUID-VkImageCreateInfo-format-00943",
"vkCreateImage(): VkFormat for image must not be VK_FORMAT_UNDEFINED.");
}
}
if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) {
if (VK_IMAGE_TYPE_2D != pCreateInfo->imageType) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-00949",
"vkCreateImage(): Image type must be VK_IMAGE_TYPE_2D when VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT "
"flag bit is set");
}
if ((pCreateInfo->extent.width != pCreateInfo->extent.height) || (pCreateInfo->arrayLayers < 6)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00954",
"vkCreateImage(): If VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT flag bit is set, width (%d) must equal "
"height (%d) and arrayLayers (%d) must be >= 6.",
pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->arrayLayers);
}
}
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
VkImageUsageFlags attach_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.width > device_limits->maxFramebufferWidth)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00964",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image width exceeds device "
"maxFramebufferWidth.");
}
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.height > device_limits->maxFramebufferHeight)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00965",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image height exceeds device "
"maxFramebufferHeight");
}
if (device_extensions.vk_ext_fragment_density_map || device_extensions.vk_ext_fragment_density_map_2) {
uint32_t ceiling_width =
(uint32_t)ceil((float)device_limits->maxFramebufferWidth /
std::max((float)phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, 1.0f));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.width > ceiling_width)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-usage-02559",
"vkCreateImage(): Image usage flags include a fragment density map bit and image width (%u) exceeds the "
"ceiling of device "
"maxFramebufferWidth (%u) / minFragmentDensityTexelSize.width (%u). The ceiling value: %u",
pCreateInfo->extent.width, device_limits->maxFramebufferWidth,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, ceiling_width);
}
uint32_t ceiling_height =
(uint32_t)ceil((float)device_limits->maxFramebufferHeight /
std::max((float)phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, 1.0f));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.height > ceiling_height)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-usage-02560",
"vkCreateImage(): Image usage flags include a fragment density map bit and image height (%u) exceeds the "
"ceiling of device "
"maxFramebufferHeight (%u) / minFragmentDensityTexelSize.height (%u). The ceiling value: %u",
pCreateInfo->extent.height, device_limits->maxFramebufferHeight,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, ceiling_height);
}
}
VkImageFormatProperties format_limits = {};
VkResult result = VK_SUCCESS;
if (pCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
result = DispatchGetPhysicalDeviceImageFormatProperties(physical_device, pCreateInfo->format, pCreateInfo->imageType,
pCreateInfo->tiling, pCreateInfo->usage, pCreateInfo->flags,
&format_limits);
} else {
auto modifier_list = lvl_find_in_chain<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo->pNext);
auto explicit_modifier = lvl_find_in_chain<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo->pNext);
if (modifier_list) {
for (uint32_t i = 0; i < modifier_list->drmFormatModifierCount; i++) {
auto drm_format_modifier = lvl_init_struct<VkPhysicalDeviceImageDrmFormatModifierInfoEXT>();
drm_format_modifier.drmFormatModifier = modifier_list->pDrmFormatModifiers[i];
auto image_format_info = lvl_init_struct<VkPhysicalDeviceImageFormatInfo2>(&drm_format_modifier);
image_format_info.type = pCreateInfo->imageType;
image_format_info.format = pCreateInfo->format;
image_format_info.tiling = pCreateInfo->tiling;
image_format_info.usage = pCreateInfo->usage;
image_format_info.flags = pCreateInfo->flags;
auto image_format_properties = lvl_init_struct<VkImageFormatProperties2>();
result =
DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &image_format_info, &image_format_properties);
format_limits = image_format_properties.imageFormatProperties;
/* The application gives a list of modifier and the driver
* selects one. If one is wrong, stop there.
*/
if (result != VK_SUCCESS) break;
}
} else if (explicit_modifier) {
auto drm_format_modifier = lvl_init_struct<VkPhysicalDeviceImageDrmFormatModifierInfoEXT>();
drm_format_modifier.drmFormatModifier = explicit_modifier->drmFormatModifier;
auto image_format_info = lvl_init_struct<VkPhysicalDeviceImageFormatInfo2>(&drm_format_modifier);
image_format_info.type = pCreateInfo->imageType;
image_format_info.format = pCreateInfo->format;
image_format_info.tiling = pCreateInfo->tiling;
image_format_info.usage = pCreateInfo->usage;
image_format_info.flags = pCreateInfo->flags;
auto image_format_properties = lvl_init_struct<VkImageFormatProperties2>();
result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &image_format_info, &image_format_properties);
format_limits = image_format_properties.imageFormatProperties;
}
}
// 1. vkGetPhysicalDeviceImageFormatProperties[2] only success code is VK_SUCCESS
// 2. If call returns an error, then "imageCreateImageFormatPropertiesList" is defined to be the empty list
// 3. All values in 02251 are undefined if "imageCreateImageFormatPropertiesList" is empty.
if (result != VK_SUCCESS) {
// External memory will always have a "imageCreateImageFormatPropertiesList" so skip
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (!lvl_find_in_chain<VkExternalFormatANDROID>(pCreateInfo->pNext))
#endif // VK_USE_PLATFORM_ANDROID_KHR
skip |= LogError(device, "VUID-VkImageCreateInfo-imageCreateMaxMipLevels-02251",
"vkCreateImage(): Format %s is not supported for this combination of parameters and "
"VkGetPhysicalDeviceImageFormatProperties returned back %s.",
string_VkFormat(pCreateInfo->format), string_VkResult(result));
} else {
if (pCreateInfo->mipLevels > format_limits.maxMipLevels) {
const char *format_string = string_VkFormat(pCreateInfo->format);
skip |= LogError(device, "VUID-VkImageCreateInfo-mipLevels-02255",
"vkCreateImage(): Image mip levels=%d exceed image format maxMipLevels=%d for format %s.",
pCreateInfo->mipLevels, format_limits.maxMipLevels, format_string);
}
uint64_t texel_count = (uint64_t)pCreateInfo->extent.width * (uint64_t)pCreateInfo->extent.height *
(uint64_t)pCreateInfo->extent.depth * (uint64_t)pCreateInfo->arrayLayers *
(uint64_t)pCreateInfo->samples;
uint64_t total_size = (uint64_t)std::ceil(FormatTexelSize(pCreateInfo->format) * texel_count);
// Round up to imageGranularity boundary
VkDeviceSize imageGranularity = phys_dev_props.limits.bufferImageGranularity;
uint64_t ig_mask = imageGranularity - 1;
total_size = (total_size + ig_mask) & ~ig_mask;
if (total_size > format_limits.maxResourceSize) {
skip |= LogWarning(device, kVUID_Core_Image_InvalidFormatLimitsViolation,
"vkCreateImage(): resource size exceeds allowable maximum Image resource size = 0x%" PRIxLEAST64
", maximum resource size = 0x%" PRIxLEAST64 " ",
total_size, format_limits.maxResourceSize);
}
if (pCreateInfo->arrayLayers > format_limits.maxArrayLayers) {
skip |= LogError(device, "VUID-VkImageCreateInfo-arrayLayers-02256",
"vkCreateImage(): arrayLayers=%d exceeds allowable maximum supported by format of %d.",
pCreateInfo->arrayLayers, format_limits.maxArrayLayers);
}
if ((pCreateInfo->samples & format_limits.sampleCounts) == 0) {
skip |= LogError(device, "VUID-VkImageCreateInfo-samples-02258",
"vkCreateImage(): samples %s is not supported by format 0x%.8X.",
string_VkSampleCountFlagBits(pCreateInfo->samples), format_limits.sampleCounts);
}
if (pCreateInfo->extent.width > format_limits.maxExtent.width) {
skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02252",
"vkCreateImage(): extent.width %u exceeds allowable maximum image extent width %u.",
pCreateInfo->extent.width, format_limits.maxExtent.width);
}
if (pCreateInfo->extent.height > format_limits.maxExtent.height) {
skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02253",
"vkCreateImage(): extent.height %u exceeds allowable maximum image extent height %u.",
pCreateInfo->extent.height, format_limits.maxExtent.height);
}
if (pCreateInfo->extent.depth > format_limits.maxExtent.depth) {
skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02254",
"vkCreateImage(): extent.depth %u exceeds allowable maximum image extent depth %u.",
pCreateInfo->extent.depth, format_limits.maxExtent.depth);
}
}
// Tests for "Formats requiring sampler YCBCR conversion for VK_IMAGE_ASPECT_COLOR_BIT image views"
if (FormatRequiresYcbcrConversion(pCreateInfo->format)) {
if (!enabled_features.ycbcr_image_array_features.ycbcrImageArrays && pCreateInfo->arrayLayers != 1) {
const char *error_vuid = (device_extensions.vk_ext_ycbcr_image_arrays) ? "VUID-VkImageCreateInfo-format-02653"
: "VUID-VkImageCreateInfo-format-02564";
skip |= LogError(device, error_vuid,
"vkCreateImage(): arrayLayers = %d, but when the ycbcrImagesArrays feature is not enabled and using a "
"YCbCr Conversion format, arrayLayers must be 1",
pCreateInfo->arrayLayers);
}
if (pCreateInfo->mipLevels != 1) {
skip |= LogError(device, "VUID-VkImageCreateInfo-format-02561",
"vkCreateImage(): mipLevels = %d, but when using a YCbCr Conversion format, mipLevels must be 1",
pCreateInfo->arrayLayers);
}
if (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(
device, "VUID-VkImageCreateInfo-format-02562",
"vkCreateImage(): samples = %s, but when using a YCbCr Conversion format, samples must be VK_SAMPLE_COUNT_1_BIT",
string_VkSampleCountFlagBits(pCreateInfo->samples));
}
if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) {
skip |= LogError(
device, "VUID-VkImageCreateInfo-format-02563",
"vkCreateImage(): imageType = %s, but when using a YCbCr Conversion format, imageType must be VK_IMAGE_TYPE_2D ",
string_VkImageType(pCreateInfo->imageType));
}
}
if (device_extensions.vk_khr_maintenance2) {
if (pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT) {
if (!(FormatIsCompressed_BC(pCreateInfo->format) || FormatIsCompressed_ASTC(pCreateInfo->format) ||
FormatIsCompressed_ETC2_EAC(pCreateInfo->format))) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01572",
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, "
"format must be block, ETC or ASTC compressed, but is %s",
string_VkFormat(pCreateInfo->format));
}
if (!(pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01573",
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, "
"flags must also contain VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
}
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
skip |= ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateImage", "pCreateInfo->pQueueFamilyIndices",
"VUID-VkImageCreateInfo-sharingMode-01420");
}
if (!FormatIsMultiplane(pCreateInfo->format) && !(pCreateInfo->flags & VK_IMAGE_CREATE_ALIAS_BIT) &&
(pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-format-01577",
"vkCreateImage(): format is %s and flags are %s. The flags should not include VK_IMAGE_CREATE_DISJOINT_BIT.",
string_VkFormat(pCreateInfo->format), string_VkImageCreateFlags(pCreateInfo->flags).c_str());
}
const auto swapchain_create_info = lvl_find_in_chain<VkImageSwapchainCreateInfoKHR>(pCreateInfo->pNext);
if (swapchain_create_info != nullptr) {
if (swapchain_create_info->swapchain != VK_NULL_HANDLE) {
const SWAPCHAIN_NODE *swapchain_state = GetSwapchainState(swapchain_create_info->swapchain);
const VkSwapchainCreateFlagsKHR swapchain_flags = swapchain_state->createInfo.flags;
// Validate rest of Swapchain Image create check that require swapchain state
const char *vuid = "VUID-VkImageSwapchainCreateInfoKHR-swapchain-00995";
if (((swapchain_flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) != 0) &&
((pCreateInfo->flags & VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT) == 0)) {
skip |= LogError(
device, vuid,
"vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR flag so "
"all swapchain images must have the VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT flag set.");
}
if (((swapchain_flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) != 0) &&
((pCreateInfo->flags & VK_IMAGE_CREATE_PROTECTED_BIT) == 0)) {
skip |= LogError(device, vuid,
"vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR flag so all "
"swapchain images must have the VK_IMAGE_CREATE_PROTECTED_BIT flag set.");
}
const VkImageCreateFlags mutable_flags = (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR);
if (((swapchain_flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) != 0) &&
((pCreateInfo->flags & mutable_flags) != mutable_flags)) {
skip |= LogError(device, vuid,
"vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR flag so "
"all swapchain images must have the VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT and "
"VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR flags both set.");
}
}
}
if ((pCreateInfo->flags & VK_IMAGE_CREATE_PROTECTED_BIT) != 0) {
if (enabled_features.core11.protectedMemory == VK_FALSE) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01890",
"vkCreateImage(): the protectedMemory device feature is disabled: Images cannot be created with the "
"VK_IMAGE_CREATE_PROTECTED_BIT set.");
}
const VkImageCreateFlags invalid_flags =
VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT;
if ((pCreateInfo->flags & invalid_flags) != 0) {
skip |= LogError(device, "VUID-VkImageCreateInfo-None-01891",
"vkCreateImage(): VK_IMAGE_CREATE_PROTECTED_BIT is set so no sparse create flags can be used at same "
"time (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT).");
}
}
skip |= ValidateImageFormatFeatures(pCreateInfo);
return skip;
}
void CoreChecks::PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage, VkResult result) {
if (VK_SUCCESS != result) return;
StateTracker::PostCallRecordCreateImage(device, pCreateInfo, pAllocator, pImage, result);
auto image_state = Get<IMAGE_STATE>(*pImage);
AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap);
}
bool CoreChecks::PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) const {
const IMAGE_STATE *image_state = GetImageState(image);
const VulkanTypedHandle obj_struct(image, kVulkanObjectTypeImage);
bool skip = false;
if (image_state) {
skip |= ValidateObjectNotInUse(image_state, obj_struct, "vkDestroyImage", "VUID-vkDestroyImage-image-01000");
}
return skip;
}
void CoreChecks::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
// Clean up validation specific data
EraseQFOReleaseBarriers<VkImageMemoryBarrier>(image);
imageLayoutMap.erase(image);
// Clean up generic image state
StateTracker::PreCallRecordDestroyImage(device, image, pAllocator);
}
bool CoreChecks::ValidateImageAttributes(const IMAGE_STATE *image_state, const VkImageSubresourceRange &range,
const char *param_name) const {
bool skip = false;
const VkImage image = image_state->image;
const VkFormat format = image_state->createInfo.format;
if (range.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= LogError(image, "VUID-vkCmdClearColorImage-aspectMask-02498",
"vkCmdClearColorImage(): %s.aspectMasks must only be set to VK_IMAGE_ASPECT_COLOR_BIT.", param_name);
}
if (FormatIsDepthOrStencil(format)) {
skip |= LogError(image, "VUID-vkCmdClearColorImage-image-00007",
"vkCmdClearColorImage(): %s called with image %s which has a depth/stencil format (%s).", param_name,
report_data->FormatHandle(image).c_str(), string_VkFormat(format));
} else if (FormatIsCompressed(format)) {
skip |= LogError(image, "VUID-vkCmdClearColorImage-image-00007",
"vkCmdClearColorImage(): %s called with image %s which has a compressed format (%s).", param_name,
report_data->FormatHandle(image).c_str(), string_VkFormat(format));
}
if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
skip |=
LogError(image, "VUID-vkCmdClearColorImage-image-00002",
"vkCmdClearColorImage() %s called with image %s which was created without VK_IMAGE_USAGE_TRANSFER_DST_BIT.",
param_name, report_data->FormatHandle(image).c_str());
}
return skip;
}
bool CoreChecks::VerifyClearImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageLayout dest_image_layout,
const char *func_name) const {
bool skip = false;
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(image_state->image, "VUID-vkCmdClearDepthStencilImage-imageLayout-00012",
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name,
string_VkImageLayout(dest_image_layout));
}
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
if (!device_extensions.vk_khr_shared_presentable_image) {
if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(image_state->image, "VUID-vkCmdClearColorImage-imageLayout-00005",
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name,
string_VkImageLayout(dest_image_layout));
}
} else {
if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL) &&
(dest_image_layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR)) {
skip |= LogError(
image_state->image, "VUID-vkCmdClearColorImage-imageLayout-01394",
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL, SHARED_PRESENT_KHR, or GENERAL.",
func_name, string_VkImageLayout(dest_image_layout));
}
}
}
// Cast to const to prevent creation at validate time.
const auto *subresource_map = GetImageSubresourceLayoutMap(cb_node, image_state->image);
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map);
VkImageSubresourceRange normalized_isr = NormalizeSubresourceRange(*image_state, range);
for (auto pos = subresource_map->Find(normalized_isr); (pos != subresource_map->End()) && !subres_skip; ++pos) {
if (!layout_check.Check(pos->subresource, dest_image_layout, pos->current_layout, pos->initial_layout)) {
const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00004";
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00011";
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
}
subres_skip |= LogError(cb_node->commandBuffer, error_code,
"%s: Cannot clear an image whose layout is %s and doesn't match the %s layout %s.",
func_name, string_VkImageLayout(dest_image_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-image-00003");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearColorImage()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdClearColorImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
if (device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearColorImage",
"VUID-vkCmdClearColorImage-image-01993");
}
skip |= InsideRenderPass(cb_node, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-renderpass");
skip |=
ValidateProtectedImage(cb_node, image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-commandBuffer-01805");
skip |= ValidateUnprotectedImage(cb_node, image_state, "vkCmdClearColorImage()",
"VUID-vkCmdClearColorImage-commandBuffer-01806");
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearColorSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= ValidateImageAttributes(image_state, pRanges[i], param_name.c_str());
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearColorImage()");
}
// Tests for "Formats requiring sampler Y’CBCR conversion for VK_IMAGE_ASPECT_COLOR_BIT image views"
if (FormatRequiresYcbcrConversion(image_state->createInfo.format)) {
skip |= LogError(device, "VUID-vkCmdClearColorImage-image-01545",
"vkCmdClearColorImage(): format (%s) must not be one of the formats requiring sampler YCBCR "
"conversion for VK_IMAGE_ASPECT_COLOR_BIT image views",
string_VkFormat(image_state->createInfo.format));
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
bool CoreChecks::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
const VkFormat image_format = image_state->createInfo.format;
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-image-00010");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearDepthStencilImage()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdClearDepthStencilImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
if (device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearDepthStencilImage",
"VUID-vkCmdClearDepthStencilImage-image-01994");
}
skip |= InsideRenderPass(cb_node, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-renderpass");
skip |= ValidateProtectedImage(cb_node, image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-commandBuffer-01807");
skip |= ValidateUnprotectedImage(cb_node, image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-commandBuffer-01808");
bool any_include_aspect_depth_bit = false;
bool any_include_aspect_stencil_bit = false;
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearDepthSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
// Image aspect must be depth or stencil or both
VkImageAspectFlags valid_aspects = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if (((pRanges[i].aspectMask & valid_aspects) == 0) || ((pRanges[i].aspectMask & ~valid_aspects) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-aspectMask-02824",
"vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask can only be VK_IMAGE_ASPECT_DEPTH_BIT "
"and/or VK_IMAGE_ASPECT_STENCIL_BIT.",
i);
}
if ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) {
any_include_aspect_depth_bit = true;
if (FormatHasDepth(image_format) == false) {
skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-image-02826",
"vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask has a VK_IMAGE_ASPECT_DEPTH_BIT but %s "
"doesn't have a depth component.",
i, string_VkFormat(image_format));
}
}
if ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) {
any_include_aspect_stencil_bit = true;
if (FormatHasStencil(image_format) == false) {
skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-image-02825",
"vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask has a VK_IMAGE_ASPECT_STENCIL_BIT but "
"%s doesn't have a stencil component.",
i, string_VkFormat(image_format));
}
}
}
if (any_include_aspect_stencil_bit) {
const auto image_stencil_struct = lvl_find_in_chain<VkImageStencilUsageCreateInfoEXT>(image_state->createInfo.pNext);
if (image_stencil_struct != nullptr) {
if ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
skip |=
LogError(device, "VUID-vkCmdClearDepthStencilImage-pRanges-02658",
"vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_STENCIL_BIT "
"and image was created with separate stencil usage, VK_IMAGE_USAGE_TRANSFER_DST_BIT must be "
"included in VkImageStencilUsageCreateInfo::stencilUsage used to create image");
}
} else {
if ((image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
skip |= LogError(
device, "VUID-vkCmdClearDepthStencilImage-pRanges-02659",
"vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_STENCIL_BIT and "
"image was not created with separate stencil usage, VK_IMAGE_USAGE_TRANSFER_DST_BIT must be included "
"in VkImageCreateInfo::usage used to create image");
}
}
}
if (any_include_aspect_depth_bit && (image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
skip |= LogError(device, "VUID-vkCmdClearDepthStencilImage-pRanges-02660",
"vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_DEPTH_BIT, "
"VK_IMAGE_USAGE_TRANSFER_DST_BIT must be included in VkImageCreateInfo::usage used to create image");
}
if (image_state && !FormatIsDepthOrStencil(image_format)) {
skip |= LogError(image, "VUID-vkCmdClearDepthStencilImage-image-00014",
"vkCmdClearDepthStencilImage(): called with image %s which doesn't have a depth/stencil format (%s).",
report_data->FormatHandle(image).c_str(), string_VkFormat(image_format));
}
if (VK_IMAGE_USAGE_TRANSFER_DST_BIT != (VK_IMAGE_USAGE_TRANSFER_DST_BIT & image_state->createInfo.usage)) {
skip |= LogError(image, "VUID-vkCmdClearDepthStencilImage-image-00009",
"vkCmdClearDepthStencilImage(): called with image %s which was not created with the "
"VK_IMAGE_USAGE_TRANSFER_DST_BIT set.",
report_data->FormatHandle(image).c_str());
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
// Returns true if [x, xoffset] and [y, yoffset] overlap
static bool RangesIntersect(int32_t start, uint32_t start_offset, int32_t end, uint32_t end_offset) {
bool result = false;
uint32_t intersection_min = std::max(static_cast<uint32_t>(start), static_cast<uint32_t>(end));
uint32_t intersection_max = std::min(static_cast<uint32_t>(start) + start_offset, static_cast<uint32_t>(end) + end_offset);
if (intersection_max > intersection_min) {
result = true;
}
return result;
}
// Returns true if source area of first copy region intersects dest area of second region
// It is assumed that these are copy regions within a single image (otherwise no possibility of collision)
static bool RegionIntersects(const VkImageCopy *rgn0, const VkImageCopy *rgn1, VkImageType type, bool is_multiplane) {
bool result = false;
// Separate planes within a multiplane image cannot intersect
if (is_multiplane && (rgn0->srcSubresource.aspectMask != rgn1->dstSubresource.aspectMask)) {
return result;
}
if ((rgn0->srcSubresource.mipLevel == rgn1->dstSubresource.mipLevel) &&
(RangesIntersect(rgn0->srcSubresource.baseArrayLayer, rgn0->srcSubresource.layerCount, rgn1->dstSubresource.baseArrayLayer,
rgn1->dstSubresource.layerCount))) {
result = true;
switch (type) {
case VK_IMAGE_TYPE_3D:
result &= RangesIntersect(rgn0->srcOffset.z, rgn0->extent.depth, rgn1->dstOffset.z, rgn1->extent.depth);
// fall through
case VK_IMAGE_TYPE_2D:
result &= RangesIntersect(rgn0->srcOffset.y, rgn0->extent.height, rgn1->dstOffset.y, rgn1->extent.height);
// fall through
case VK_IMAGE_TYPE_1D:
result &= RangesIntersect(rgn0->srcOffset.x, rgn0->extent.width, rgn1->dstOffset.x, rgn1->extent.width);
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
}
return result;
}
// Returns non-zero if offset and extent exceed image extents
static const uint32_t x_bit = 1;
static const uint32_t y_bit = 2;
static const uint32_t z_bit = 4;
static uint32_t ExceedsBounds(const VkOffset3D *offset, const VkExtent3D *extent, const VkExtent3D *image_extent) {
uint32_t result = 0;
// Extents/depths cannot be negative but checks left in for clarity
if ((offset->z + extent->depth > image_extent->depth) || (offset->z < 0) ||
((offset->z + static_cast<int32_t>(extent->depth)) < 0)) {
result |= z_bit;
}
if ((offset->y + extent->height > image_extent->height) || (offset->y < 0) ||
((offset->y + static_cast<int32_t>(extent->height)) < 0)) {
result |= y_bit;
}
if ((offset->x + extent->width > image_extent->width) || (offset->x < 0) ||
((offset->x + static_cast<int32_t>(extent->width)) < 0)) {
result |= x_bit;
}
return result;
}
// Test if two VkExtent3D structs are equivalent
static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
bool result = true;
if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
(extent->depth != other_extent->depth)) {
result = false;
}
return result;
}
// Test if the extent argument has all dimensions set to 0.
static inline bool IsExtentAllZeroes(const VkExtent3D *extent) {
return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
}
// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
VkExtent3D CoreChecks::GetScaledItg(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img) const {
// Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
VkExtent3D granularity = {0, 0, 0};
auto pPool = cb_node->command_pool.get();
if (pPool) {
granularity = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
if (FormatIsCompressed(img->createInfo.format) || FormatIsSinglePlane_422(img->createInfo.format)) {
auto block_size = FormatTexelBlockExtent(img->createInfo.format);
granularity.width *= block_size.width;
granularity.height *= block_size.height;
}
}
return granularity;
}
// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
bool valid = true;
if ((SafeModulo(extent->depth, granularity->depth) != 0) || (SafeModulo(extent->width, granularity->width) != 0) ||
(SafeModulo(extent->height, granularity->height) != 0)) {
valid = false;
}
return valid;
}
// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgOffset(const CMD_BUFFER_STATE *cb_node, const VkOffset3D *offset, const VkExtent3D *granularity,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
VkExtent3D offset_extent = {};
offset_extent.width = static_cast<uint32_t>(abs(offset->x));
offset_extent.height = static_cast<uint32_t>(abs(offset->y));
offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
if (IsExtentAllZeroes(&offset_extent) == false) {
skip |= LogError(cb_node->commandBuffer, vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) when the command buffer's queue family "
"image transfer granularity is (w=0, h=0, d=0).",
function, i, member, offset->x, offset->y, offset->z);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
// integer multiples of the image transfer granularity.
if (IsExtentAligned(&offset_extent, granularity) == false) {
skip |= LogError(cb_node->commandBuffer, vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
granularity->depth);
}
}
return skip;
}
// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgExtent(const CMD_BUFFER_STATE *cb_node, const VkExtent3D *extent, const VkOffset3D *offset,
const VkExtent3D *granularity, const VkExtent3D *subresource_extent, const VkImageType image_type,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
// subresource extent.
if (IsExtentEqual(extent, subresource_extent) == false) {
skip |= LogError(cb_node->commandBuffer, vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
"when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
subresource_extent->height, subresource_extent->depth);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
// integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
// subresource extent dimensions.
VkExtent3D offset_extent_sum = {};
offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
bool x_ok = true;
bool y_ok = true;
bool z_ok = true;
switch (image_type) {
case VK_IMAGE_TYPE_3D:
z_ok = ((0 == SafeModulo(extent->depth, granularity->depth)) ||
(subresource_extent->depth == offset_extent_sum.depth));
// fall through
case VK_IMAGE_TYPE_2D:
y_ok = ((0 == SafeModulo(extent->height, granularity->height)) ||
(subresource_extent->height == offset_extent_sum.height));
// fall through
case VK_IMAGE_TYPE_1D:
x_ok = ((0 == SafeModulo(extent->width, granularity->width)) ||
(subresource_extent->width == offset_extent_sum.width));
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
if (!(x_ok && y_ok && z_ok)) {
skip |=
LogError(cb_node->commandBuffer, vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
"extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
subresource_extent->width, subresource_extent->height, subresource_extent->depth);
}
}
return skip;
}
bool CoreChecks::ValidateImageMipLevel(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, uint32_t mip_level,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
if (mip_level >= img->createInfo.mipLevels) {
skip |= LogError(cb_node->commandBuffer, vuid, "In %s, pRegions[%u].%s.mipLevel is %u, but provided %s has %u mip levels.",
function, i, member, mip_level, report_data->FormatHandle(img->image).c_str(), img->createInfo.mipLevels);
}
return skip;
}
bool CoreChecks::ValidateImageArrayLayerRange(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, const uint32_t base_layer,
const uint32_t layer_count, const uint32_t i, const char *function,
const char *member, const char *vuid) const {
bool skip = false;
if (base_layer >= img->createInfo.arrayLayers || layer_count > img->createInfo.arrayLayers ||
(base_layer + layer_count) > img->createInfo.arrayLayers) {
skip |= LogError(cb_node->commandBuffer, vuid,
"In %s, pRegions[%u].%s.baseArrayLayer is %u and .layerCount is "
"%u, but provided %s has %u array layers.",
function, i, member, base_layer, layer_count, report_data->FormatHandle(img->image).c_str(),
img->createInfo.arrayLayers);
}
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkBufferImageCopy structure
bool CoreChecks::ValidateCopyBufferImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img,
const VkBufferImageCopy *region, const uint32_t i,
const char *function, const char *vuid) const {
bool skip = false;
VkExtent3D granularity = GetScaledItg(cb_node, img);
skip |= CheckItgOffset(cb_node, ®ion->imageOffset, &granularity, i, function, "imageOffset", vuid);
VkExtent3D subresource_extent = GetImageSubresourceExtent(img, ®ion->imageSubresource);
skip |= CheckItgExtent(cb_node, ®ion->imageExtent, ®ion->imageOffset, &granularity, &subresource_extent,
img->createInfo.imageType, i, function, "imageExtent", vuid);
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkImageCopy structure
bool CoreChecks::ValidateCopyImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *src_img,
const IMAGE_STATE *dst_img, const VkImageCopy *region,
const uint32_t i, const char *function) const {
bool skip = false;
// Source image checks
VkExtent3D granularity = GetScaledItg(cb_node, src_img);
skip |=
CheckItgOffset(cb_node, ®ion->srcOffset, &granularity, i, function, "srcOffset", "VUID-vkCmdCopyImage-srcOffset-01783");
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_img, ®ion->srcSubresource);
const VkExtent3D extent = region->extent;
skip |= CheckItgExtent(cb_node, &extent, ®ion->srcOffset, &granularity, &subresource_extent, src_img->createInfo.imageType,
i, function, "extent", "VUID-vkCmdCopyImage-srcOffset-01783");
// Destination image checks
granularity = GetScaledItg(cb_node, dst_img);
skip |=
CheckItgOffset(cb_node, ®ion->dstOffset, &granularity, i, function, "dstOffset", "VUID-vkCmdCopyImage-dstOffset-01784");
// Adjust dest extent, if necessary
const VkExtent3D dest_effective_extent =
GetAdjustedDestImageExtent(src_img->createInfo.format, dst_img->createInfo.format, extent);
subresource_extent = GetImageSubresourceExtent(dst_img, ®ion->dstSubresource);
skip |= CheckItgExtent(cb_node, &dest_effective_extent, ®ion->dstOffset, &granularity, &subresource_extent,
dst_img->createInfo.imageType, i, function, "extent", "VUID-vkCmdCopyImage-dstOffset-01784");
return skip;
}
// Validate contents of a VkImageCopy struct
bool CoreChecks::ValidateImageCopyData(const uint32_t regionCount, const VkImageCopy *ic_regions, const IMAGE_STATE *src_state,
const IMAGE_STATE *dst_state) const {
bool skip = false;
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageCopy region = ic_regions[i];
// For comp<->uncomp copies, the copy extent for the dest image must be adjusted
const VkExtent3D src_copy_extent = region.extent;
const VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_state->createInfo.format, dst_state->createInfo.format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != dst_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != src_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
// Do all checks on source image
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.srcOffset.y) || (1 != src_copy_extent.height)) {
skip |=
LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-00146",
"vkCmdCopyImage(): pRegion[%d] srcOffset.y is %d and extent.height is %d. For 1D images these must "
"be 0 and 1, respectively.",
i, region.srcOffset.y, src_copy_extent.height);
}
}
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.srcOffset.z) || (1 != src_copy_extent.depth))) {
skip |= LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-01785",
"vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d and extent.depth is %d. For 1D images "
"these must be 0 and 1, respectively.",
i, region.srcOffset.z, src_copy_extent.depth);
}
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.srcOffset.z)) {
skip |= LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-01787",
"vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d. For 2D images the z-offset must be 0.", i,
region.srcOffset.z);
}
// Source checks that apply only to compressed images (or to _422 images if ycbcr enabled)
bool ext_ycbcr = IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion);
if (FormatIsCompressed(src_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(src_state->createInfo.format))) {
const VkExtent3D block_size = FormatTexelBlockExtent(src_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.srcOffset.x, block_size.width) != 0) ||
(SafeModulo(region.srcOffset.y, block_size.height) != 0) ||
(SafeModulo(region.srcOffset.z, block_size.depth) != 0)) {
const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-srcImage-01727" : "VUID-vkCmdCopyImage-srcImage-01727";
skip |= LogError(src_state->image, vuid,
"vkCmdCopyImage(): pRegion[%d] srcOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
i, region.srcOffset.x, region.srcOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = GetImageSubresourceExtent(src_state, &(region.srcSubresource));
if ((SafeModulo(src_copy_extent.width, block_size.width) != 0) &&
(src_copy_extent.width + region.srcOffset.x != mip_extent.width)) {
const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-srcImage-01728" : "VUID-vkCmdCopyImage-srcImage-01728";
skip |=
LogError(src_state->image, vuid,
"vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"width (%d), or when added to srcOffset.x (%d) must equal the image subresource width (%d).",
i, src_copy_extent.width, block_size.width, region.srcOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(src_copy_extent.height, block_size.height) != 0) &&
(src_copy_extent.height + region.srcOffset.y != mip_extent.height)) {
const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-srcImage-01729" : "VUID-vkCmdCopyImage-srcImage-01729";
skip |=
LogError(src_state->image, vuid,
"vkCmdCopyImage(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block "
"height (%d), or when added to srcOffset.y (%d) must equal the image subresource height (%d).",
i, src_copy_extent.height, block_size.height, region.srcOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : src_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.srcOffset.z != mip_extent.depth)) {
const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-srcImage-01730" : "VUID-vkCmdCopyImage-srcImage-01730";
skip |=
LogError(src_state->image, vuid,
"vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"depth (%d), or when added to srcOffset.z (%d) must equal the image subresource depth (%d).",
i, src_copy_extent.depth, block_size.depth, region.srcOffset.z, mip_extent.depth);
}
} // Compressed
// Do all checks on dest image
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.dstOffset.y) || (1 != dst_copy_extent.height)) {
skip |= LogError(dst_state->image, "VUID-vkCmdCopyImage-dstImage-00152",
"vkCmdCopyImage(): pRegion[%d] dstOffset.y is %d and dst_copy_extent.height is %d. For 1D images "
"these must be 0 and 1, respectively.",
i, region.dstOffset.y, dst_copy_extent.height);
}
}
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.dstOffset.z) || (1 != dst_copy_extent.depth))) {
skip |=
LogError(dst_state->image, "VUID-vkCmdCopyImage-dstImage-01786",
"vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d and extent.depth is %d. For 1D images these must be 0 "
"and 1, respectively.",
i, region.dstOffset.z, dst_copy_extent.depth);
}
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.dstOffset.z)) {
skip |= LogError(dst_state->image, "VUID-vkCmdCopyImage-dstImage-01788",
"vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d. For 2D images the z-offset must be 0.", i,
region.dstOffset.z);
}
// Handle difference between Maintenance 1
if (device_extensions.vk_khr_maintenance1) {
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
skip |=
LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-04443",
"vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and srcSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
}
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
skip |=
LogError(dst_state->image, "VUID-vkCmdCopyImage-dstImage-04444",
"vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
} else { // Pre maint 1
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
skip |= LogError(src_state->image, "VUID-vkCmdCopyImage-srcImage-00139",
"vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and "
"srcSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
skip |= LogError(dst_state->image, "VUID-vkCmdCopyImage-srcImage-00139",
"vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and "
"dstSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
}
// Dest checks that apply only to compressed images (or to _422 images if ycbcr enabled)
if (FormatIsCompressed(dst_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(dst_state->createInfo.format))) {
const VkExtent3D block_size = FormatTexelBlockExtent(dst_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.dstOffset.x, block_size.width) != 0) ||
(SafeModulo(region.dstOffset.y, block_size.height) != 0) ||
(SafeModulo(region.dstOffset.z, block_size.depth) != 0)) {
const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-dstImage-01731" : "VUID-vkCmdCopyImage-dstImage-01731";
skip |= LogError(dst_state->image, vuid,
"vkCmdCopyImage(): pRegion[%d] dstOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
i, region.dstOffset.x, region.dstOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = GetImageSubresourceExtent(dst_state, &(region.dstSubresource));
if ((SafeModulo(dst_copy_extent.width, block_size.width) != 0) &&
(dst_copy_extent.width + region.dstOffset.x != mip_extent.width)) {
const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-dstImage-01732" : "VUID-vkCmdCopyImage-dstImage-01732";
skip |= LogError(
dst_state->image, vuid,
"vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block width (%d), or when added to dstOffset.x (%d) must equal the image subresource width (%d).",
i, dst_copy_extent.width, block_size.width, region.dstOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or dst_copy_extent+offset height must equal subresource height
if ((SafeModulo(dst_copy_extent.height, block_size.height) != 0) &&
(dst_copy_extent.height + region.dstOffset.y != mip_extent.height)) {
const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-dstImage-01733" : "VUID-vkCmdCopyImage-dstImage-01733";
skip |= LogError(dst_state->image, vuid,
"vkCmdCopyImage(): pRegion[%d] dst_copy_extent height (%d) must be a multiple of the compressed "
"texture block height (%d), or when added to dstOffset.y (%d) must equal the image subresource "
"height (%d).",
i, dst_copy_extent.height, block_size.height, region.dstOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or dst_copy_extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : dst_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.dstOffset.z != mip_extent.depth)) {
const char *vuid = ext_ycbcr ? "VUID-vkCmdCopyImage-dstImage-01734" : "VUID-vkCmdCopyImage-dstImage-01734";
skip |= LogError(
dst_state->image, vuid,
"vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block depth (%d), or when added to dstOffset.z (%d) must equal the image subresource depth (%d).",
i, dst_copy_extent.depth, block_size.depth, region.dstOffset.z, mip_extent.depth);
}
} // Compressed
}
return skip;
}
// vkCmdCopyImage checks that only apply if the multiplane extension is enabled
bool CoreChecks::CopyImageMultiplaneValidation(VkCommandBuffer command_buffer, const IMAGE_STATE *src_image_state,
const IMAGE_STATE *dst_image_state, const VkImageCopy region) const {
bool skip = false;
// Neither image is multiplane
if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format))) {
// If neither image is multi-plane the aspectMask member of src and dst must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Copy between non-multiplane images with differing aspectMasks ( 0x" << std::hex
<< region.srcSubresource.aspectMask << " and 0x" << region.dstSubresource.aspectMask << " )";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01551", "%s.", ss.str().c_str());
}
} else {
// Source image multiplane checks
uint32_t planes = FormatPlaneCount(src_image_state->createInfo.format);
VkImageAspectFlags aspect = region.srcSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01552", "%s.", ss.str().c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01553", "%s.", ss.str().c_str());
}
// Single-plane to multi-plane
if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (FormatIsMultiplane(dst_image_state->createInfo.format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Source image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstImage-01557", "%s.", ss.str().c_str());
}
// Dest image multiplane checks
planes = FormatPlaneCount(dst_image_state->createInfo.format);
aspect = region.dstSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstImage-01554", "%s.", ss.str().c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstImage-01555", "%s.", ss.str().c_str());
}
// Multi-plane to single-plane
if ((FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): Dest image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01556", "%s.", ss.str().c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) const {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
const VkFormat src_format = src_image_state->createInfo.format;
const VkFormat dst_format = dst_image_state->createInfo.format;
bool skip = false;
skip = ValidateImageCopyData(regionCount, pRegions, src_image_state, dst_image_state);
VkCommandBuffer command_buffer = cb_node->commandBuffer;
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageCopy region = pRegions[i];
// For comp/uncomp copies, the copy extent for the dest image must be adjusted
VkExtent3D src_copy_extent = region.extent;
VkExtent3D dst_copy_extent = GetAdjustedDestImageExtent(src_format, dst_format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != dst_image_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != src_image_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
skip |= ValidateImageSubresourceLayers(cb_node, ®ion.srcSubresource, "vkCmdCopyImage", "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, ®ion.dstSubresource, "vkCmdCopyImage", "dstSubresource", i);
skip |= ValidateImageMipLevel(cb_node, src_image_state, region.srcSubresource.mipLevel, i, "vkCmdCopyImage",
"srcSubresource", "VUID-vkCmdCopyImage-srcSubresource-01696");
skip |= ValidateImageMipLevel(cb_node, dst_image_state, region.dstSubresource.mipLevel, i, "vkCmdCopyImage",
"dstSubresource", "VUID-vkCmdCopyImage-dstSubresource-01697");
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, region.srcSubresource.baseArrayLayer,
region.srcSubresource.layerCount, i, "vkCmdCopyImage", "srcSubresource",
"VUID-vkCmdCopyImage-srcSubresource-01698");
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, region.dstSubresource.baseArrayLayer,
region.dstSubresource.layerCount, i, "vkCmdCopyImage", "dstSubresource",
"VUID-vkCmdCopyImage-dstSubresource-01699");
if (device_extensions.vk_khr_maintenance1) {
// No chance of mismatch if we're overriding depth slice count
if (!slice_override) {
// The number of depth slices in srcSubresource and dstSubresource must match
// Depth comes from layerCount for 1D,2D resources, from extent.depth for 3D
uint32_t src_slices =
(VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType ? src_copy_extent.depth
: region.srcSubresource.layerCount);
uint32_t dst_slices =
(VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType ? dst_copy_extent.depth
: region.dstSubresource.layerCount);
if (src_slices != dst_slices) {
skip |= LogError(command_buffer, "VUID-VkImageCopy-extent-00140",
"vkCmdCopyImage(): number of depth slices in source and destination subresources for "
"pRegions[%u] do not match.",
i);
}
}
} else {
// For each region the layerCount member of srcSubresource and dstSubresource must match
if (region.srcSubresource.layerCount != region.dstSubresource.layerCount) {
skip |= LogError(
command_buffer, "VUID-VkImageCopy-layerCount-00138",
"vkCmdCopyImage(): number of layers in source and destination subresources for pRegions[%u] do not match", i);
}
}
// Do multiplane-specific checks, if extension enabled
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
skip |= CopyImageMultiplaneValidation(command_buffer, src_image_state, dst_image_state, region);
}
if (!device_extensions.vk_khr_sampler_ycbcr_conversion) {
// not multi-plane, the aspectMask member of srcSubresource and dstSubresource must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
char const str[] = "vkCmdCopyImage(): Src and dest aspectMasks for each region must match";
skip |= LogError(command_buffer, "VUID-VkImageCopy-aspectMask-00137", "%s.", str);
}
}
// For each region, the aspectMask member of srcSubresource must be present in the source image
if (!VerifyAspectsPresent(region.srcSubresource.aspectMask, src_format)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): pRegion[" << i
<< "] srcSubresource.aspectMask cannot specify aspects not present in source image";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-aspectMask-00142", "%s.", ss.str().c_str());
}
// For each region, the aspectMask member of dstSubresource must be present in the destination image
if (!VerifyAspectsPresent(region.dstSubresource.aspectMask, dst_format)) {
std::stringstream ss;
ss << "vkCmdCopyImage(): pRegion[" << i
<< "] dstSubresource.aspectMask cannot specify aspects not present in dest image";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-aspectMask-00143", "%s.", ss.str().c_str());
}
// Each dimension offset + extent limits must fall with image subresource extent
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_image_state, &(region.srcSubresource));
if (slice_override) src_copy_extent.depth = depth_slices;
uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &src_copy_extent, &subresource_extent);
if (extent_check & x_bit) {
skip |=
LogError(command_buffer, "VUID-vkCmdCopyImage-srcOffset-00144",
"vkCmdCopyImage(): Source image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
i, region.srcOffset.x, src_copy_extent.width, subresource_extent.width);
}
if (extent_check & y_bit) {
skip |=
LogError(command_buffer, "VUID-vkCmdCopyImage-srcOffset-00145",
"vkCmdCopyImage(): Source image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
i, region.srcOffset.y, src_copy_extent.height, subresource_extent.height);
}
if (extent_check & z_bit) {
skip |=
LogError(command_buffer, "VUID-vkCmdCopyImage-srcOffset-00147",
"vkCmdCopyImage(): Source image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
i, region.srcOffset.z, src_copy_extent.depth, subresource_extent.depth);
}
// Adjust dest extent if necessary
subresource_extent = GetImageSubresourceExtent(dst_image_state, &(region.dstSubresource));
if (slice_override) dst_copy_extent.depth = depth_slices;
extent_check = ExceedsBounds(&(region.dstOffset), &dst_copy_extent, &subresource_extent);
if (extent_check & x_bit) {
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstOffset-00150",
"vkCmdCopyImage(): Dest image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
i, region.dstOffset.x, dst_copy_extent.width, subresource_extent.width);
}
if (extent_check & y_bit) {
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstOffset-00151",
"vkCmdCopyImage(): Dest image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
i, region.dstOffset.y, dst_copy_extent.height, subresource_extent.height);
}
if (extent_check & z_bit) {
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstOffset-00153",
"vkCmdCopyImage(): Dest image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
i, region.dstOffset.z, dst_copy_extent.depth, subresource_extent.depth);
}
// The union of all source regions, and the union of all destination regions, specified by the elements of regions,
// must not overlap in memory
if (src_image_state->image == dst_image_state->image) {
for (uint32_t j = 0; j < regionCount; j++) {
if (RegionIntersects(®ion, &pRegions[j], src_image_state->createInfo.imageType,
FormatIsMultiplane(src_format))) {
std::stringstream ss;
ss << "vkCmdCopyImage(): pRegions[" << i << "] src overlaps with pRegions[" << j << "].";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-pRegions-00124", "%s.", ss.str().c_str());
}
}
}
// Check depth for 2D as post Maintaince 1 requires both while prior only required one to be 2D
if (device_extensions.vk_khr_maintenance1) {
if (((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType)) &&
(src_copy_extent.depth != 1)) {
skip |= LogError(
command_buffer, "VUID-vkCmdCopyImage-srcImage-01790",
"vkCmdCopyImage(): pRegion[%u] both srcImage and dstImage are 2D and extent.depth is %u and has to be 1", i,
src_copy_extent.depth);
}
} else {
if (((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) ||
(VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType)) &&
(src_copy_extent.depth != 1)) {
skip |= LogError(
command_buffer, "VUID-vkCmdCopyImage-srcImage-01789",
"vkCmdCopyImage(): pRegion[%u] either srcImage or dstImage is 2D and extent.depth is %u and has to be 1", i,
src_copy_extent.depth);
}
}
// Check if 2D with 3D and depth not equal to 2D layerCount
if ((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) &&
(src_copy_extent.depth != region.srcSubresource.layerCount)) {
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-01791",
"vkCmdCopyImage(): pRegion[%u] srcImage is 2D, dstImage is 3D and extent.depth is %u and has to be "
"srcSubresource.layerCount (%u)",
i, src_copy_extent.depth, region.srcSubresource.layerCount);
} else if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType) &&
(src_copy_extent.depth != region.dstSubresource.layerCount)) {
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-dstImage-01792",
"vkCmdCopyImage(): pRegion[%u] srcImage is 3D, dstImage is 2D and extent.depth is %u and has to be "
"dstSubresource.layerCount (%u)",
i, src_copy_extent.depth, region.dstSubresource.layerCount);
}
// Check for multi-plane format compatiblity
if (FormatIsMultiplane(src_format) || FormatIsMultiplane(dst_format)) {
size_t src_format_size = 0;
size_t dst_format_size = 0;
if (FormatIsMultiplane(src_format)) {
const VkFormat planeFormat = FindMultiplaneCompatibleFormat(src_format, region.srcSubresource.aspectMask);
src_format_size = FormatElementSize(planeFormat);
} else {
src_format_size = FormatElementSize(src_format);
}
if (FormatIsMultiplane(dst_format)) {
const VkFormat planeFormat = FindMultiplaneCompatibleFormat(dst_format, region.dstSubresource.aspectMask);
dst_format_size = FormatElementSize(planeFormat);
} else {
dst_format_size = FormatElementSize(dst_format);
}
// If size is still zero, then format is invalid and will be caught in another VU
if ((src_format_size != dst_format_size) && (src_format_size != 0) && (dst_format_size != 0)) {
skip |=
LogError(command_buffer, "VUID-vkCmdCopyImage-None-01549",
"vkCmdCopyImage(): pRegions[%u] called with non-compatible image formats. "
"The src format %s with aspectMask %s is not compatible with dst format %s aspectMask %s.",
i, string_VkFormat(src_format), string_VkImageAspectFlags(region.srcSubresource.aspectMask).c_str(),
string_VkFormat(dst_format), string_VkImageAspectFlags(region.dstSubresource.aspectMask).c_str());
}
}
}
// The formats of non-multiplane src_image and dst_image must be compatible. Formats are considered compatible if their texel
// size in bytes is the same between both formats. For example, VK_FORMAT_R8G8B8A8_UNORM is compatible with VK_FORMAT_R32_UINT
// because because both texels are 4 bytes in size.
if (!FormatIsMultiplane(src_format) && !FormatIsMultiplane(dst_format)) {
const char *compatible_vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-vkCmdCopyImage-srcImage-01548"
: "VUID-vkCmdCopyImage-srcImage-00135";
// Depth/stencil formats must match exactly.
if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) {
if (src_format != dst_format) {
skip |= LogError(command_buffer, compatible_vuid,
"vkCmdCopyImage(): Depth/stencil formats must match exactly for src (%s) and dst (%s).",
string_VkFormat(src_format), string_VkFormat(dst_format));
}
} else {
if (!FormatSizesAreEqual(src_format, dst_format, regionCount, pRegions)) {
skip |= LogError(command_buffer, compatible_vuid,
"vkCmdCopyImage(): Unmatched image format sizes. "
"The src format %s has size of %zu and dst format %s has size of %zu.",
string_VkFormat(src_format), FormatElementSize(src_format), string_VkFormat(dst_format),
FormatElementSize(dst_format));
}
}
}
// Source and dest image sample counts must match
if (src_image_state->createInfo.samples != dst_image_state->createInfo.samples) {
char const str[] = "vkCmdCopyImage() called on image pair with non-identical sample counts.";
skip |= LogError(command_buffer, "VUID-vkCmdCopyImage-srcImage-00136", "%s", str);
}
skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-srcImage-00127");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-dstImage-00132");
// Validate that SRC & DST images have correct usage flags set
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyImage-srcImage-00126",
"vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyImage-dstImage-00131",
"vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
skip |= ValidateProtectedImage(cb_node, src_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-commandBuffer-01825");
skip |= ValidateProtectedImage(cb_node, dst_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-commandBuffer-01826");
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-commandBuffer-01827");
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
skip |= LogError(
command_buffer, "VUID-vkCmdCopyImage-dstImage-02542",
"vkCmdCopyImage(): srcImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT");
}
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
skip |= LogError(
command_buffer, "VUID-vkCmdCopyImage-dstImage-02542",
"vkCmdCopyImage(): dstImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT");
}
if (device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, "vkCmdCopyImage()",
"VUID-vkCmdCopyImage-srcImage-01995");
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdCopyImage()",
"VUID-vkCmdCopyImage-dstImage-01996");
}
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdCopyImage()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
skip |= InsideRenderPass(cb_node, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-renderpass");
bool hit_error = false;
const char *invalid_src_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImage-srcImageLayout-01917"
: "VUID-vkCmdCopyImage-srcImageLayout-00129";
const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImage-dstImageLayout-01395"
: "VUID-vkCmdCopyImage-dstImageLayout-00134";
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImage()", invalid_src_layout_vuid,
"VUID-vkCmdCopyImage-srcImageLayout-00128", &hit_error);
skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyImage()", invalid_dst_layout_vuid,
"VUID-vkCmdCopyImage-dstImageLayout-00133", &hit_error);
skip |= ValidateCopyImageTransferGranularityRequirements(cb_node, src_image_state, dst_image_state, &pRegions[i], i,
"vkCmdCopyImage()");
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout);
}
}
// Returns true if sub_rect is entirely contained within rect
static inline bool ContainsRect(VkRect2D rect, VkRect2D sub_rect) {
if ((sub_rect.offset.x < rect.offset.x) || (sub_rect.offset.x + sub_rect.extent.width > rect.offset.x + rect.extent.width) ||
(sub_rect.offset.y < rect.offset.y) || (sub_rect.offset.y + sub_rect.extent.height > rect.offset.y + rect.extent.height))
return false;
return true;
}
bool CoreChecks::ValidateClearAttachmentExtent(VkCommandBuffer command_buffer, uint32_t attachment_index,
const FRAMEBUFFER_STATE *framebuffer, uint32_t fb_attachment,
const VkRect2D &render_area, uint32_t rect_count,
const VkClearRect *clear_rects) const {
bool skip = false;
const IMAGE_VIEW_STATE *image_view_state = nullptr;
if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) && (fb_attachment < framebuffer->createInfo.attachmentCount)) {
image_view_state = GetAttachmentImageViewState(GetCBState(command_buffer), framebuffer, fb_attachment);
}
for (uint32_t j = 0; j < rect_count; j++) {
if (!ContainsRect(render_area, clear_rects[j].rect)) {
skip |= LogError(command_buffer, "VUID-vkCmdClearAttachments-pRects-00016",
"vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of "
"the current render pass instance.",
j);
}
if (image_view_state) {
// The layers specified by a given element of pRects must be contained within every attachment that
// pAttachments refers to
const auto attachment_layer_count = image_view_state->create_info.subresourceRange.layerCount;
if ((clear_rects[j].baseArrayLayer >= attachment_layer_count) ||
(clear_rects[j].baseArrayLayer + clear_rects[j].layerCount > attachment_layer_count)) {
skip |= LogError(command_buffer, "VUID-vkCmdClearAttachments-pRects-00017",
"vkCmdClearAttachments(): The layers defined in pRects[%d] are not contained in the layers "
"of pAttachment[%d].",
j, attachment_index);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_node = GetCBState(commandBuffer); // TODO: Should be const, and never modified during validation
if (!cb_node) return skip;
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdClearAttachments()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdClearAttachments-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
skip |= OutsideRenderPass(cb_node, "vkCmdClearAttachments()", "VUID-vkCmdClearAttachments-renderpass");
// Validate that attachment is in reference list of active subpass
if (cb_node->activeRenderPass) {
const VkRenderPassCreateInfo2KHR *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
const uint32_t renderpass_attachment_count = renderpass_create_info->attachmentCount;
const VkSubpassDescription2KHR *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
const auto *framebuffer = cb_node->activeFramebuffer.get();
const auto &render_area = cb_node->activeRenderPassBeginInfo.renderArea;
for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) {
auto clear_desc = &pAttachments[attachment_index];
uint32_t fb_attachment = VK_ATTACHMENT_UNUSED;
if (0 == clear_desc->aspectMask) {
skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-requiredbitmask", " ");
} else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-00020", " ");
} else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
uint32_t color_attachment = VK_ATTACHMENT_UNUSED;
if (clear_desc->colorAttachment < subpass_desc->colorAttachmentCount) {
color_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment;
if ((color_attachment != VK_ATTACHMENT_UNUSED) && (color_attachment >= renderpass_attachment_count)) {
skip |= LogError(
commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02501",
"vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u is not VK_ATTACHMENT_UNUSED "
"and not a valid attachment for %s attachmentCount=%u. Subpass %u pColorAttachment[%u]=%u.",
attachment_index, clear_desc->colorAttachment,
report_data->FormatHandle(cb_node->activeRenderPass->renderPass).c_str(), cb_node->activeSubpass,
clear_desc->colorAttachment, color_attachment, renderpass_attachment_count);
color_attachment = VK_ATTACHMENT_UNUSED; // Defensive, prevent lookup past end of renderpass attachment
}
} else {
skip |= LogError(commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02501",
"vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u out of range for %s"
" subpass %u. colorAttachmentCount=%u",
attachment_index, clear_desc->colorAttachment,
report_data->FormatHandle(cb_node->activeRenderPass->renderPass).c_str(),
cb_node->activeSubpass, subpass_desc->colorAttachmentCount);
}
fb_attachment = color_attachment;
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
(clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
char const str[] =
"vkCmdClearAttachments() aspectMask [%d] must set only VK_IMAGE_ASPECT_COLOR_BIT of a color attachment.";
skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-00019", str, attachment_index);
}
} else { // Must be depth and/or stencil
if (((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) &&
((clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT)) {
char const str[] = "vkCmdClearAttachments() aspectMask [%d] is not a valid combination of bits.";
skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-parameter", str, attachment_index);
}
if (!subpass_desc->pDepthStencilAttachment ||
(subpass_desc->pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED)) {
skip |= LogPerformanceWarning(
commandBuffer, kVUID_Core_DrawState_MissingAttachmentReference,
"vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
} else {
fb_attachment = subpass_desc->pDepthStencilAttachment->attachment;
}
}
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= ValidateClearAttachmentExtent(commandBuffer, attachment_index, framebuffer, fb_attachment, render_area,
rectCount, pRects);
}
// Once the framebuffer attachment is found, can get the image view state
if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) &&
(fb_attachment < framebuffer->createInfo.attachmentCount)) {
const IMAGE_VIEW_STATE *image_view_state =
GetAttachmentImageViewState(GetCBState(commandBuffer), framebuffer, fb_attachment);
if (image_view_state != nullptr) {
skip |= ValidateProtectedImage(cb_node, image_view_state->image_state.get(), "vkCmdClearAttachments()",
"VUID-vkCmdClearAttachments-commandBuffer-02504");
skip |= ValidateUnprotectedImage(cb_node, image_view_state->image_state.get(), "vkCmdClearAttachments()",
"VUID-vkCmdClearAttachments-commandBuffer-02505");
}
}
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) {
auto *cb_node = GetCBState(commandBuffer);
if (cb_node->activeRenderPass && (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)) {
const VkRenderPassCreateInfo2KHR *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2KHR *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
std::shared_ptr<std::vector<VkClearRect>> clear_rect_copy;
for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) {
const auto clear_desc = &pAttachments[attachment_index];
uint32_t fb_attachment = VK_ATTACHMENT_UNUSED;
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) &&
(clear_desc->colorAttachment < subpass_desc->colorAttachmentCount)) {
fb_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment;
} else if ((clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) &&
subpass_desc->pDepthStencilAttachment) {
fb_attachment = subpass_desc->pDepthStencilAttachment->attachment;
}
if (fb_attachment != VK_ATTACHMENT_UNUSED) {
if (!clear_rect_copy) {
// We need a copy of the clear rectangles that will persist until the last lambda executes
// but we want to create it as lazily as possible
clear_rect_copy.reset(new std::vector<VkClearRect>(pRects, pRects + rectCount));
}
// if a secondary level command buffer inherits the framebuffer from the primary command buffer
// (see VkCommandBufferInheritanceInfo), this validation must be deferred until queue submit time
auto val_fn = [this, commandBuffer, attachment_index, fb_attachment, rectCount, clear_rect_copy](
const CMD_BUFFER_STATE *prim_cb, const FRAMEBUFFER_STATE *fb) {
assert(rectCount == clear_rect_copy->size());
const auto &render_area = prim_cb->activeRenderPassBeginInfo.renderArea;
bool skip = false;
skip = ValidateClearAttachmentExtent(commandBuffer, attachment_index, fb, fb_attachment, render_area, rectCount,
clear_rect_copy->data());
return skip;
};
cb_node->cmd_execute_commands_functions.emplace_back(val_fn);
}
}
}
}
bool CoreChecks::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) const {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
bool skip = false;
if (cb_node && src_image_state && dst_image_state) {
skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-srcImage-00256");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-dstImage-00258");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdResolveImage()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdResolveImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
skip |= InsideRenderPass(cb_node, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-renderpass");
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, "vkCmdResolveImage()",
"VUID-vkCmdResolveImage-dstImage-02003");
skip |=
ValidateProtectedImage(cb_node, src_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-commandBuffer-01837");
skip |=
ValidateProtectedImage(cb_node, dst_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-commandBuffer-01838");
skip |=
ValidateUnprotectedImage(cb_node, dst_image_state, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-commandBuffer-01839");
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
skip |= LogError(cb_node->commandBuffer, "vkCmdResolveImage-dstImage-02546",
"vkCmdResolveImage(): srcImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT");
}
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
skip |= LogError(cb_node->commandBuffer, "vkCmdResolveImage-dstImage-02546",
"vkCmdResolveImage(): dstImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT");
}
bool hit_error = false;
const char *invalid_src_layout_vuid =
(src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-srcImageLayout-01400"
: "VUID-vkCmdResolveImage-srcImageLayout-00261";
const char *invalid_dst_layout_vuid =
(dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-dstImageLayout-01401"
: "VUID-vkCmdResolveImage-dstImageLayout-00263";
// For each region, the number of layers in the image subresource should not be zero
// For each region, src and dest image aspect must be color only
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageResolve region = pRegions[i];
const VkImageSubresourceLayers src_subresource = region.srcSubresource;
const VkImageSubresourceLayers dst_subresource = region.dstSubresource;
skip |= ValidateImageSubresourceLayers(cb_node, &src_subresource, "vkCmdResolveImage()", "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, &dst_subresource, "vkCmdResolveImage()", "dstSubresource", i);
skip |= VerifyImageLayout(cb_node, src_image_state, src_subresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdResolveImage()", invalid_src_layout_vuid,
"VUID-vkCmdResolveImage-srcImageLayout-00260", &hit_error);
skip |= VerifyImageLayout(cb_node, dst_image_state, dst_subresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdResolveImage()", invalid_dst_layout_vuid,
"VUID-vkCmdResolveImage-dstImageLayout-00262", &hit_error);
skip |= ValidateImageMipLevel(cb_node, src_image_state, src_subresource.mipLevel, i, "vkCmdResolveImage()",
"srcSubresource", "VUID-vkCmdResolveImage-srcSubresource-01709");
skip |= ValidateImageMipLevel(cb_node, dst_image_state, dst_subresource.mipLevel, i, "vkCmdResolveImage()",
"dstSubresource", "VUID-vkCmdResolveImage-dstSubresource-01710");
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, src_subresource.baseArrayLayer,
src_subresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource",
"VUID-vkCmdResolveImage-srcSubresource-01711");
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, dst_subresource.baseArrayLayer,
dst_subresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource",
"VUID-vkCmdResolveImage-dstSubresource-01712");
// layer counts must match
if (src_subresource.layerCount != dst_subresource.layerCount) {
skip |= LogError(
cb_node->commandBuffer, "VUID-VkImageResolve-layerCount-00267",
"vkCmdResolveImage(): layerCount in source and destination subresource of pRegions[%u] does not match.", i);
}
// For each region, src and dest image aspect must be color only
if ((src_subresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) ||
(dst_subresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT)) {
skip |= LogError(
cb_node->commandBuffer, "VUID-VkImageResolve-aspectMask-00266",
"vkCmdResolveImage(): src and dest aspectMasks for pRegions[%u] must specify only VK_IMAGE_ASPECT_COLOR_BIT.",
i);
}
const VkImageType src_image_type = src_image_state->createInfo.imageType;
const VkImageType dst_image_type = dst_image_state->createInfo.imageType;
if ((VK_IMAGE_TYPE_3D == src_image_type) || (VK_IMAGE_TYPE_3D == dst_image_type)) {
if ((0 != src_subresource.baseArrayLayer) || (1 != src_subresource.layerCount)) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(src_image_state->image);
objlist.add(dst_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcImage-04446",
"vkCmdResolveImage(): pRegions[%u] baseArrayLayer must be 0 and layerCount must be 1 for all "
"subresources if the src or dst image is 3D.",
i);
}
if ((0 != dst_subresource.baseArrayLayer) || (1 != dst_subresource.layerCount)) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(src_image_state->image);
objlist.add(dst_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcImage-04447",
"vkCmdResolveImage(): pRegions[%u] baseArrayLayer must be 0 and layerCount must be 1 for all "
"subresources if the src or dst image is 3D.",
i);
}
}
if (VK_IMAGE_TYPE_1D == src_image_type) {
if ((pRegions[i].srcOffset.y != 0) || (pRegions[i].extent.height != 1)) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(src_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcImage-00271",
"vkCmdResolveImage(): srcImage (%s) is 1D but pRegions[%u] srcOffset.y (%d) is not 0 or "
"extent.height (%u) is not 1.",
report_data->FormatHandle(src_image_state->image).c_str(), i, pRegions[i].srcOffset.y,
pRegions[i].extent.height);
}
}
if ((VK_IMAGE_TYPE_1D == src_image_type) || (VK_IMAGE_TYPE_2D == src_image_type)) {
if ((pRegions[i].srcOffset.z != 0) || (pRegions[i].extent.depth != 1)) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(src_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcImage-00273",
"vkCmdResolveImage(): srcImage (%s) is 2D but pRegions[%u] srcOffset.z (%d) is not 0 or "
"extent.depth (%u) is not 1.",
report_data->FormatHandle(src_image_state->image).c_str(), i, pRegions[i].srcOffset.z,
pRegions[i].extent.depth);
}
}
if (VK_IMAGE_TYPE_1D == dst_image_type) {
if ((pRegions[i].dstOffset.y != 0) || (pRegions[i].extent.height != 1)) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(dst_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstImage-00276",
"vkCmdResolveImage(): dstImage (%s) is 1D but pRegions[%u] dstOffset.y (%d) is not 0 or "
"extent.height (%u) is not 1.",
report_data->FormatHandle(dst_image_state->image).c_str(), i, pRegions[i].dstOffset.y,
pRegions[i].extent.height);
}
}
if ((VK_IMAGE_TYPE_1D == dst_image_type) || (VK_IMAGE_TYPE_2D == dst_image_type)) {
if ((pRegions[i].dstOffset.z != 0) || (pRegions[i].extent.depth != 1)) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(dst_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstImage-00278",
"vkCmdResolveImage(): dstImage (%s) is 2D but pRegions[%u] dstOffset.z (%d) is not 0 or "
"extent.depth (%u) is not 1.",
report_data->FormatHandle(dst_image_state->image).c_str(), i, pRegions[i].dstOffset.z,
pRegions[i].extent.depth);
}
}
// Each srcImage dimension offset + extent limits must fall with image subresource extent
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_image_state, &src_subresource);
// MipLevel bound is checked already and adding extra errors with a "subresource extent of zero" is confusing to
// developer
if (src_subresource.mipLevel < src_image_state->createInfo.mipLevels) {
uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &(region.extent), &subresource_extent);
if ((extent_check & x_bit) != 0) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(src_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcOffset-00269",
"vkCmdResolveImage(): srcImage (%s) pRegions[%u] x-dimension offset [%1d] + extent [%u] "
"exceeds subResource width [%u].",
report_data->FormatHandle(src_image_state->image).c_str(), i, region.srcOffset.x,
region.extent.width, subresource_extent.width);
}
if ((extent_check & y_bit) != 0) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(src_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcOffset-00270",
"vkCmdResolveImage(): srcImage (%s) pRegions[%u] y-dimension offset [%1d] + extent [%u] "
"exceeds subResource height [%u].",
report_data->FormatHandle(src_image_state->image).c_str(), i, region.srcOffset.y,
region.extent.height, subresource_extent.height);
}
if ((extent_check & z_bit) != 0) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(src_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-srcOffset-00272",
"vkCmdResolveImage(): srcImage (%s) pRegions[%u] z-dimension offset [%1d] + extent [%u] "
"exceeds subResource depth [%u].",
report_data->FormatHandle(src_image_state->image).c_str(), i, region.srcOffset.z,
region.extent.depth, subresource_extent.depth);
}
}
// Each dstImage dimension offset + extent limits must fall with image subresource extent
subresource_extent = GetImageSubresourceExtent(dst_image_state, &dst_subresource);
// MipLevel bound is checked already and adding extra errors with a "subresource extent of zero" is confusing to
// developer
if (dst_subresource.mipLevel < dst_image_state->createInfo.mipLevels) {
uint32_t extent_check = ExceedsBounds(&(region.dstOffset), &(region.extent), &subresource_extent);
if ((extent_check & x_bit) != 0) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(dst_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstOffset-00274",
"vkCmdResolveImage(): dstImage (%s) pRegions[%u] x-dimension offset [%1d] + extent [%u] "
"exceeds subResource width [%u].",
report_data->FormatHandle(dst_image_state->image).c_str(), i, region.srcOffset.x,
region.extent.width, subresource_extent.width);
}
if ((extent_check & y_bit) != 0) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(dst_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstOffset-00275",
"vkCmdResolveImage(): dstImage (%s) pRegions[%u] y-dimension offset [%1d] + extent [%u] "
"exceeds subResource height [%u].",
report_data->FormatHandle(dst_image_state->image).c_str(), i, region.srcOffset.y,
region.extent.height, subresource_extent.height);
}
if ((extent_check & z_bit) != 0) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(dst_image_state->image);
skip |= LogError(objlist, "VUID-vkCmdResolveImage-dstOffset-00277",
"vkCmdResolveImage(): dstImage (%s) pRegions[%u] z-dimension offset [%1d] + extent [%u] "
"exceeds subResource depth [%u].",
report_data->FormatHandle(dst_image_state->image).c_str(), i, region.srcOffset.z,
region.extent.depth, subresource_extent.depth);
}
}
}
if (src_image_state->createInfo.format != dst_image_state->createInfo.format) {
skip |=
LogError(cb_node->commandBuffer, "VUID-vkCmdResolveImage-srcImage-01386",
"vkCmdResolveImage(): srcImage format (%s) and dstImage format (%s) are not the same.",
string_VkFormat(src_image_state->createInfo.format), string_VkFormat(dst_image_state->createInfo.format));
}
if (src_image_state->createInfo.imageType != dst_image_state->createInfo.imageType) {
skip |= LogWarning(cb_node->commandBuffer, kVUID_Core_DrawState_MismatchedImageType,
"vkCmdResolveImage(): srcImage type (%s) and dstImage type (%s) are not the same.",
string_VkImageType(src_image_state->createInfo.imageType),
string_VkImageType(dst_image_state->createInfo.imageType));
}
if (src_image_state->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdResolveImage-srcImage-00257",
"vkCmdResolveImage(): srcImage sample count is VK_SAMPLE_COUNT_1_BIT.");
}
if (dst_image_state->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdResolveImage-dstImage-00259",
"vkCmdResolveImage(): dstImage sample count (%s) is not VK_SAMPLE_COUNT_1_BIT.",
string_VkSampleCountFlagBits(dst_image_state->createInfo.samples));
}
} else {
assert(0);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) const {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
bool skip = false;
if (cb_node) {
skip |= ValidateCmd(cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
}
if (cb_node && src_image_state && dst_image_state) {
skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage",
"VUID-vkCmdBlitImage-srcImage-00233");
skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage",
"VUID-vkCmdBlitImage-dstImage-00234");
skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-srcImage-00220");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-dstImage-00225");
skip |=
ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdBlitImage-srcImage-00219",
"vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
skip |=
ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdBlitImage-dstImage-00224",
"vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
skip |=
ValidateCmdQueueFlags(cb_node, "vkCmdBlitImage()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdBlitImage-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
skip |= InsideRenderPass(cb_node, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-renderpass");
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_BLIT_SRC_BIT, "vkCmdBlitImage()",
"VUID-vkCmdBlitImage-srcImage-01999");
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_BLIT_DST_BIT, "vkCmdBlitImage()",
"VUID-vkCmdBlitImage-dstImage-02000");
skip |= ValidateProtectedImage(cb_node, src_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-commandBuffer-01834");
skip |= ValidateProtectedImage(cb_node, dst_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-commandBuffer-01835");
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-commandBuffer-01836");
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
skip |= LogError(
cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstImage-02545",
"vkCmdBlitImage(): srcImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT");
}
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
skip |= LogError(
cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstImage-02545",
"vkCmdBlitImage(): dstImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT");
}
// TODO: Need to validate image layouts, which will include layout validation for shared presentable images
VkFormat src_format = src_image_state->createInfo.format;
VkFormat dst_format = dst_image_state->createInfo.format;
VkImageType src_type = src_image_state->createInfo.imageType;
VkImageType dst_type = dst_image_state->createInfo.imageType;
if (VK_FILTER_LINEAR == filter) {
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT,
"vkCmdBlitImage()", "VUID-vkCmdBlitImage-filter-02001");
} else if (VK_FILTER_CUBIC_IMG == filter) {
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG,
"vkCmdBlitImage()", "VUID-vkCmdBlitImage-filter-02002");
}
if (FormatRequiresYcbcrConversion(src_format)) {
skip |= LogError(device, "VUID-vkCmdBlitImage-srcImage-01561",
"vkCmdBlitImage(): srcImage format (%s) must not be one of the formats requiring sampler YCBCR "
"conversion for VK_IMAGE_ASPECT_COLOR_BIT image views",
string_VkFormat(src_format));
}
if (FormatRequiresYcbcrConversion(dst_format)) {
skip |= LogError(device, "VUID-vkCmdBlitImage-dstImage-01562",
"vkCmdBlitImage(): dstImage format (%s) must not be one of the formats requiring sampler YCBCR "
"conversion for VK_IMAGE_ASPECT_COLOR_BIT image views",
string_VkFormat(dst_format));
}
if ((VK_FILTER_CUBIC_IMG == filter) && (VK_IMAGE_TYPE_3D != src_type)) {
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-filter-00237",
"vkCmdBlitImage(): source image type must be VK_IMAGE_TYPE_3D when cubic filtering is specified.");
}
// Validate consistency for unsigned formats
if (FormatIsUInt(src_format) != FormatIsUInt(dst_format)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has unsigned integer format, "
<< "the other one must also have unsigned integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00230", "%s.", ss.str().c_str());
}
// Validate consistency for signed formats
if (FormatIsSInt(src_format) != FormatIsSInt(dst_format)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has signed integer format, "
<< "the other one must also have signed integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00229", "%s.", ss.str().c_str());
}
// Validate filter for Depth/Stencil formats
if (FormatIsDepthOrStencil(src_format) && (filter != VK_FILTER_NEAREST)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): If the format of srcImage is a depth, stencil, or depth stencil "
<< "then filter must be VK_FILTER_NEAREST.";
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00232", "%s.", ss.str().c_str());
}
// Validate aspect bits and formats for depth/stencil images
if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) {
if (src_format != dst_format) {
std::stringstream ss;
ss << "vkCmdBlitImage(): If one of srcImage and dstImage images has a format of depth, stencil or depth "
<< "stencil, the other one must have exactly the same format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is "
<< string_VkFormat(dst_format);
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00231", "%s.", ss.str().c_str());
}
} // Depth or Stencil
// Do per-region checks
const char *invalid_src_layout_vuid =
(src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-srcImageLayout-01398"
: "VUID-vkCmdBlitImage-srcImageLayout-00222";
const char *invalid_dst_layout_vuid =
(dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-dstImageLayout-01399"
: "VUID-vkCmdBlitImage-dstImageLayout-00227";
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageBlit rgn = pRegions[i];
bool hit_error = false;
skip |= VerifyImageLayout(cb_node, src_image_state, rgn.srcSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdBlitImage()", invalid_src_layout_vuid,
"VUID-vkCmdBlitImage-srcImageLayout-00221", &hit_error);
skip |= VerifyImageLayout(cb_node, dst_image_state, rgn.dstSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdBlitImage()", invalid_dst_layout_vuid,
"VUID-vkCmdBlitImage-dstImageLayout-00226", &hit_error);
skip |= ValidateImageSubresourceLayers(cb_node, &rgn.srcSubresource, "vkCmdBlitImage()", "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, &rgn.dstSubresource, "vkCmdBlitImage()", "dstSubresource", i);
skip |= ValidateImageMipLevel(cb_node, src_image_state, rgn.srcSubresource.mipLevel, i, "vkCmdBlitImage()",
"srcSubresource", "VUID-vkCmdBlitImage-srcSubresource-01705");
skip |= ValidateImageMipLevel(cb_node, dst_image_state, rgn.dstSubresource.mipLevel, i, "vkCmdBlitImage()",
"dstSubresource", "VUID-vkCmdBlitImage-dstSubresource-01706");
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, rgn.srcSubresource.baseArrayLayer,
rgn.srcSubresource.layerCount, i, "vkCmdBlitImage()", "srcSubresource",
"VUID-vkCmdBlitImage-srcSubresource-01707");
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, rgn.dstSubresource.baseArrayLayer,
rgn.dstSubresource.layerCount, i, "vkCmdBlitImage()", "dstSubresource",
"VUID-vkCmdBlitImage-dstSubresource-01708");
// Warn for zero-sized regions
if ((rgn.srcOffsets[0].x == rgn.srcOffsets[1].x) || (rgn.srcOffsets[0].y == rgn.srcOffsets[1].y) ||
(rgn.srcOffsets[0].z == rgn.srcOffsets[1].z)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): pRegions[" << i << "].srcOffsets specify a zero-volume area.";
skip |= LogWarning(cb_node->commandBuffer, kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
if ((rgn.dstOffsets[0].x == rgn.dstOffsets[1].x) || (rgn.dstOffsets[0].y == rgn.dstOffsets[1].y) ||
(rgn.dstOffsets[0].z == rgn.dstOffsets[1].z)) {
std::stringstream ss;
ss << "vkCmdBlitImage(): pRegions[" << i << "].dstOffsets specify a zero-volume area.";
skip |= LogWarning(cb_node->commandBuffer, kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
// Check that src/dst layercounts match
if (rgn.srcSubresource.layerCount != rgn.dstSubresource.layerCount) {
skip |= LogError(
cb_node->commandBuffer, "VUID-VkImageBlit-layerCount-00239",
"vkCmdBlitImage(): layerCount in source and destination subresource of pRegions[%d] does not match.", i);
}
if (rgn.srcSubresource.aspectMask != rgn.dstSubresource.aspectMask) {
skip |= LogError(cb_node->commandBuffer, "VUID-VkImageBlit-aspectMask-00238",
"vkCmdBlitImage(): aspectMask members for pRegion[%d] do not match.", i);
}
if (!VerifyAspectsPresent(rgn.srcSubresource.aspectMask, src_format)) {
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-aspectMask-00241",
"vkCmdBlitImage(): region [%d] source aspectMask (0x%x) specifies aspects not present in source "
"image format %s.",
i, rgn.srcSubresource.aspectMask, string_VkFormat(src_format));
}
if (!VerifyAspectsPresent(rgn.dstSubresource.aspectMask, dst_format)) {
skip |= LogError(
cb_node->commandBuffer, "VUID-vkCmdBlitImage-aspectMask-00242",
"vkCmdBlitImage(): region [%d] dest aspectMask (0x%x) specifies aspects not present in dest image format %s.",
i, rgn.dstSubresource.aspectMask, string_VkFormat(dst_format));
}
// Validate source image offsets
VkExtent3D src_extent = GetImageSubresourceExtent(src_image_state, &(rgn.srcSubresource));
if (VK_IMAGE_TYPE_1D == src_type) {
if ((0 != rgn.srcOffsets[0].y) || (1 != rgn.srcOffsets[1].y)) {
skip |=
LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00245",
"vkCmdBlitImage(): region [%d], source image of type VK_IMAGE_TYPE_1D with srcOffset[].y values "
"of (%1d, %1d). These must be (0, 1).",
i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == src_type) || (VK_IMAGE_TYPE_2D == src_type)) {
if ((0 != rgn.srcOffsets[0].z) || (1 != rgn.srcOffsets[1].z)) {
skip |=
LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00247",
"vkCmdBlitImage(): region [%d], source image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"srcOffset[].z values of (%1d, %1d). These must be (0, 1).",
i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z);
}
}
bool oob = false;
if ((rgn.srcOffsets[0].x < 0) || (rgn.srcOffsets[0].x > static_cast<int32_t>(src_extent.width)) ||
(rgn.srcOffsets[1].x < 0) || (rgn.srcOffsets[1].x > static_cast<int32_t>(src_extent.width))) {
oob = true;
skip |= LogError(
cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcOffset-00243",
"vkCmdBlitImage(): region [%d] srcOffset[].x values (%1d, %1d) exceed srcSubresource width extent (%1d).", i,
rgn.srcOffsets[0].x, rgn.srcOffsets[1].x, src_extent.width);
}
if ((rgn.srcOffsets[0].y < 0) || (rgn.srcOffsets[0].y > static_cast<int32_t>(src_extent.height)) ||
(rgn.srcOffsets[1].y < 0) || (rgn.srcOffsets[1].y > static_cast<int32_t>(src_extent.height))) {
oob = true;
skip |= LogError(
cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcOffset-00244",
"vkCmdBlitImage(): region [%d] srcOffset[].y values (%1d, %1d) exceed srcSubresource height extent (%1d).", i,
rgn.srcOffsets[0].y, rgn.srcOffsets[1].y, src_extent.height);
}
if ((rgn.srcOffsets[0].z < 0) || (rgn.srcOffsets[0].z > static_cast<int32_t>(src_extent.depth)) ||
(rgn.srcOffsets[1].z < 0) || (rgn.srcOffsets[1].z > static_cast<int32_t>(src_extent.depth))) {
oob = true;
skip |= LogError(
cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcOffset-00246",
"vkCmdBlitImage(): region [%d] srcOffset[].z values (%1d, %1d) exceed srcSubresource depth extent (%1d).", i,
rgn.srcOffsets[0].z, rgn.srcOffsets[1].z, src_extent.depth);
}
if (oob) {
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-pRegions-00215",
"vkCmdBlitImage(): region [%d] source image blit region exceeds image dimensions.", i);
}
// Validate dest image offsets
VkExtent3D dst_extent = GetImageSubresourceExtent(dst_image_state, &(rgn.dstSubresource));
if (VK_IMAGE_TYPE_1D == dst_type) {
if ((0 != rgn.dstOffsets[0].y) || (1 != rgn.dstOffsets[1].y)) {
skip |=
LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstImage-00250",
"vkCmdBlitImage(): region [%d], dest image of type VK_IMAGE_TYPE_1D with dstOffset[].y values of "
"(%1d, %1d). These must be (0, 1).",
i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == dst_type) || (VK_IMAGE_TYPE_2D == dst_type)) {
if ((0 != rgn.dstOffsets[0].z) || (1 != rgn.dstOffsets[1].z)) {
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstImage-00252",
"vkCmdBlitImage(): region [%d], dest image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"dstOffset[].z values of (%1d, %1d). These must be (0, 1).",
i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z);
}
}
oob = false;
if ((rgn.dstOffsets[0].x < 0) || (rgn.dstOffsets[0].x > static_cast<int32_t>(dst_extent.width)) ||
(rgn.dstOffsets[1].x < 0) || (rgn.dstOffsets[1].x > static_cast<int32_t>(dst_extent.width))) {
oob = true;
skip |= LogError(
cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstOffset-00248",
"vkCmdBlitImage(): region [%d] dstOffset[].x values (%1d, %1d) exceed dstSubresource width extent (%1d).", i,
rgn.dstOffsets[0].x, rgn.dstOffsets[1].x, dst_extent.width);
}
if ((rgn.dstOffsets[0].y < 0) || (rgn.dstOffsets[0].y > static_cast<int32_t>(dst_extent.height)) ||
(rgn.dstOffsets[1].y < 0) || (rgn.dstOffsets[1].y > static_cast<int32_t>(dst_extent.height))) {
oob = true;
skip |= LogError(
cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstOffset-00249",
"vkCmdBlitImage(): region [%d] dstOffset[].y values (%1d, %1d) exceed dstSubresource height extent (%1d).", i,
rgn.dstOffsets[0].y, rgn.dstOffsets[1].y, dst_extent.height);
}
if ((rgn.dstOffsets[0].z < 0) || (rgn.dstOffsets[0].z > static_cast<int32_t>(dst_extent.depth)) ||
(rgn.dstOffsets[1].z < 0) || (rgn.dstOffsets[1].z > static_cast<int32_t>(dst_extent.depth))) {
oob = true;
skip |= LogError(
cb_node->commandBuffer, "VUID-vkCmdBlitImage-dstOffset-00251",
"vkCmdBlitImage(): region [%d] dstOffset[].z values (%1d, %1d) exceed dstSubresource depth extent (%1d).", i,
rgn.dstOffsets[0].z, rgn.dstOffsets[1].z, dst_extent.depth);
}
if (oob) {
skip |= LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-pRegions-00216",
"vkCmdBlitImage(): region [%d] destination image blit region exceeds image dimensions.", i);
}
if ((VK_IMAGE_TYPE_3D == src_type) || (VK_IMAGE_TYPE_3D == dst_type)) {
if ((0 != rgn.srcSubresource.baseArrayLayer) || (1 != rgn.srcSubresource.layerCount) ||
(0 != rgn.dstSubresource.baseArrayLayer) || (1 != rgn.dstSubresource.layerCount)) {
skip |=
LogError(cb_node->commandBuffer, "VUID-vkCmdBlitImage-srcImage-00240",
"vkCmdBlitImage(): region [%d] blit to/from a 3D image type with a non-zero baseArrayLayer, or a "
"layerCount other than 1.",
i);
}
}
} // per-region checks
} else {
assert(0);
}
return skip;
}
void CoreChecks::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions, filter);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout);
}
}
GlobalImageLayoutRangeMap *GetLayoutRangeMap(GlobalImageLayoutMap *map, const IMAGE_STATE &image_state) {
assert(map);
// This approach allows for a single hash lookup or/create new
auto inserted = map->emplace(std::make_pair(image_state.image, nullptr));
if (inserted.second) {
assert(nullptr == inserted.first->second.get());
GlobalImageLayoutRangeMap *layout_map = new GlobalImageLayoutRangeMap(image_state.subresource_encoder.SubresourceCount());
inserted.first->second.reset(layout_map);
return layout_map;
} else {
assert(nullptr != inserted.first->second.get());
return inserted.first->second.get();
}
return nullptr;
}
const GlobalImageLayoutRangeMap *GetLayoutRangeMap(const GlobalImageLayoutMap &map, VkImage image) {
auto it = map.find(image);
if (it != map.end()) {
return it->second.get();
}
return nullptr;
}
// This validates that the initial layout specified in the command buffer for the IMAGE is the same as the global IMAGE layout
bool CoreChecks::ValidateCmdBufImageLayouts(const CMD_BUFFER_STATE *pCB, const GlobalImageLayoutMap &globalImageLayoutMap,
GlobalImageLayoutMap *overlayLayoutMap_arg) const {
if (disabled[image_layout_validation]) return false;
bool skip = false;
GlobalImageLayoutMap &overlayLayoutMap = *overlayLayoutMap_arg;
// Iterate over the layout maps for each referenced image
GlobalImageLayoutRangeMap empty_map(1);
for (const auto &layout_map_entry : pCB->image_layout_map) {
const auto image = layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't check layouts of a dead image
const auto &subres_map = layout_map_entry.second;
const auto &initial_layout_map = subres_map->GetInitialLayoutMap();
// Validate the initial_uses for each subresource referenced
if (initial_layout_map.empty()) continue;
auto *overlay_map = GetLayoutRangeMap(&overlayLayoutMap, *image_state);
const auto *global_map = GetLayoutRangeMap(globalImageLayoutMap, image);
if (global_map == nullptr) {
global_map = &empty_map;
}
// Note: don't know if it would matter
// if (global_map->empty() && overlay_map->empty()) // skip this next loop...;
auto pos = initial_layout_map.begin();
const auto end = initial_layout_map.end();
sparse_container::parallel_iterator<const ImageSubresourceLayoutMap::LayoutMap> current_layout(*overlay_map, *global_map,
pos->first.begin);
while (pos != end) {
VkImageLayout initial_layout = pos->second;
VkImageLayout image_layout = kInvalidLayout;
if (current_layout->range.empty()) break; // When we are past the end of data in overlay and global... stop looking
if (current_layout->pos_A->valid) { // pos_A denotes the overlay map in the parallel iterator
image_layout = current_layout->pos_A->lower_bound->second;
} else if (current_layout->pos_B->valid) { // pos_B denotes the global map in the parallel iterator
image_layout = current_layout->pos_B->lower_bound->second;
}
const auto intersected_range = pos->first & current_layout->range;
if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
} else if (image_layout != initial_layout) {
// Need to look up the inital layout *state* to get a bit more information
const auto *initial_layout_state = subres_map->GetSubresourceInitialLayoutState(pos->first.begin);
assert(initial_layout_state); // There's no way we should have an initial layout without matching state...
bool matches = ImageLayoutMatches(initial_layout_state->aspect_mask, image_layout, initial_layout);
if (!matches) {
// We can report all the errors for the intersected range directly
for (auto index : sparse_container::range_view<decltype(intersected_range)>(intersected_range)) {
const auto subresource = image_state->subresource_encoder.Decode(index);
skip |= LogError(
pCB->commandBuffer, kVUID_Core_DrawState_InvalidImageLayout,
"Submitted command buffer expects %s (subresource: aspectMask 0x%X array layer %u, mip level %u) "
"to be in layout %s--instead, current layout is %s.",
report_data->FormatHandle(image).c_str(), subresource.aspectMask, subresource.arrayLayer,
subresource.mipLevel, string_VkImageLayout(initial_layout), string_VkImageLayout(image_layout));
}
}
}
if (pos->first.includes(intersected_range.end)) {
current_layout.seek(intersected_range.end);
} else {
++pos;
if (pos != end) {
current_layout.seek(pos->first.begin);
}
}
}
// Update all layout set operations (which will be a subset of the initial_layouts)
sparse_container::splice(overlay_map, subres_map->GetCurrentLayoutMap(), sparse_container::value_precedence::prefer_source);
}
return skip;
}
void CoreChecks::UpdateCmdBufImageLayouts(CMD_BUFFER_STATE *pCB) {
for (const auto &layout_map_entry : pCB->image_layout_map) {
const auto image = layout_map_entry.first;
const auto &subres_map = layout_map_entry.second;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
auto *global_map = GetLayoutRangeMap(&imageLayoutMap, *image_state);
sparse_container::splice(global_map, subres_map->GetCurrentLayoutMap(), sparse_container::value_precedence::prefer_source);
}
}
// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that READ_ONLY
// layout attachments don't have CLEAR as their loadOp.
bool CoreChecks::ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version,
const VkImageLayout first_layout, const uint32_t attachment,
const VkAttachmentDescription2KHR &attachment_description) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
// Verify that initial loadOp on READ_ONLY attachments is not CLEAR
// for both loadOp and stencilLoaOp rp2 has it in 1 VU while rp1 has it in 2 VU with half behind Maintenance2 extension
// Each is VUID is below in following order: rp2 -> rp1 with Maintenance2 -> rp1 with no extenstion
if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-02522",
"vkCreateRenderPass2(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && (device_extensions.vk_khr_maintenance2) &&
(first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL)) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-01566",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-00836",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
}
}
// Same as above for loadOp, but for stencilLoadOp
if (attachment_description.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-02523",
"vkCreateRenderPass2(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && (device_extensions.vk_khr_maintenance2) &&
(first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-01567",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-02511",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
}
}
return skip;
}
// Helper function to validate correct usage bits set for buffers or images. Verify that (actual & desired) flags != 0 or, if strict
// is true, verify that (actual & desired) flags == desired
template <typename T1>
bool CoreChecks::ValidateUsageFlags(VkFlags actual, VkFlags desired, VkBool32 strict, const T1 object,
const VulkanTypedHandle &typed_handle, const char *msgCode, char const *func_name,
char const *usage_str) const {
bool correct_usage = false;
bool skip = false;
const char *type_str = object_string[typed_handle.type];
if (strict) {
correct_usage = ((actual & desired) == desired);
} else {
correct_usage = ((actual & desired) != 0);
}
if (!correct_usage) {
// All callers should have a valid VUID
assert(msgCode != kVUIDUndefined);
skip =
LogError(object, msgCode, "Invalid usage flag for %s used by %s. In this case, %s should have %s set during creation.",
report_data->FormatHandle(typed_handle).c_str(), func_name, type_str, usage_str);
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool CoreChecks::ValidateImageUsageFlags(IMAGE_STATE const *image_state, VkFlags desired, bool strict, const char *msgCode,
char const *func_name, char const *usage_string) const {
return ValidateUsageFlags(image_state->createInfo.usage, desired, strict, image_state->image,
VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage), msgCode, func_name, usage_string);
}
bool CoreChecks::ValidateImageFormatFeatureFlags(IMAGE_STATE const *image_state, VkFormatFeatureFlags desired,
char const *func_name, const char *vuid) const {
bool skip = false;
const VkFormatFeatureFlags image_format_features = image_state->format_features;
if ((image_format_features & desired) != desired) {
// Same error, but more details if it was an AHB external format
if (image_state->has_ahb_format == true) {
skip |= LogError(image_state->image, vuid,
"In %s, VkFormatFeatureFlags (0x%08X) does not support required feature %s for the external format "
"found in VkAndroidHardwareBufferFormatPropertiesANDROID::formatFeatures used by %s.",
func_name, image_format_features, string_VkFormatFeatureFlags(desired).c_str(),
report_data->FormatHandle(image_state->image).c_str());
} else {
skip |= LogError(image_state->image, vuid,
"In %s, VkFormatFeatureFlags (0x%08X) does not support required feature %s for format %u used by %s "
"with tiling %s.",
func_name, image_format_features, string_VkFormatFeatureFlags(desired).c_str(),
image_state->createInfo.format, report_data->FormatHandle(image_state->image).c_str(),
string_VkImageTiling(image_state->createInfo.tiling));
}
}
return skip;
}
bool CoreChecks::ValidateImageSubresourceLayers(const CMD_BUFFER_STATE *cb_node, const VkImageSubresourceLayers *subresource_layers,
char const *func_name, char const *member, uint32_t i) const {
bool skip = false;
// layerCount must not be zero
if (subresource_layers->layerCount == 0) {
skip |= LogError(cb_node->commandBuffer, "VUID-VkImageSubresourceLayers-layerCount-01700",
"In %s, pRegions[%u].%s.layerCount must not be zero.", func_name, i, member);
}
// aspectMask must not contain VK_IMAGE_ASPECT_METADATA_BIT
if (subresource_layers->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= LogError(cb_node->commandBuffer, "VUID-VkImageSubresourceLayers-aspectMask-00168",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_METADATA_BIT set.", func_name, i, member);
}
// if aspectMask contains COLOR, it must not contain either DEPTH or STENCIL
if ((subresource_layers->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) &&
(subresource_layers->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
skip |= LogError(cb_node->commandBuffer, "VUID-VkImageSubresourceLayers-aspectMask-00167",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_COLOR_BIT and either VK_IMAGE_ASPECT_DEPTH_BIT or "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, i, member);
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool CoreChecks::ValidateBufferUsageFlags(BUFFER_STATE const *buffer_state, VkFlags desired, bool strict, const char *msgCode,
char const *func_name, char const *usage_string) const {
return ValidateUsageFlags(buffer_state->createInfo.usage, desired, strict, buffer_state->buffer,
VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), msgCode, func_name, usage_string);
}
bool CoreChecks::ValidateBufferViewRange(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo,
const VkPhysicalDeviceLimits *device_limits) const {
bool skip = false;
const VkDeviceSize &range = pCreateInfo->range;
if (range != VK_WHOLE_SIZE) {
// Range must be greater than 0
if (range <= 0) {
skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-range-00928",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be greater than 0.",
range);
}
// Range must be a multiple of the element size of format
const uint32_t format_size = FormatElementSize(pCreateInfo->format);
if (SafeModulo(range, format_size) != 0) {
skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-range-00929",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be a multiple of the element size of the format "
"(%" PRIu32 ").",
range, format_size);
}
// Range divided by the element size of format must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements
if (SafeDivision(range, format_size) > device_limits->maxTexelBufferElements) {
skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-range-00930",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range divided by the element size of the format (%" PRIu32
") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").",
range, format_size, device_limits->maxTexelBufferElements);
}
// The sum of range and offset must be less than or equal to the size of buffer
if (range + pCreateInfo->offset > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-offset-00931",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, the sum of offset (%" PRIuLEAST64
") and range must be less than or equal to the size of the buffer (%" PRIuLEAST64 ").",
range, pCreateInfo->offset, buffer_state->createInfo.size);
}
} else {
const uint32_t format_size = FormatElementSize(pCreateInfo->format);
// Size of buffer - offset, divided by the element size of format must be less than or equal to
// VkPhysicalDeviceLimits::maxTexelBufferElements
if (SafeDivision(buffer_state->createInfo.size - pCreateInfo->offset, format_size) >
device_limits->maxTexelBufferElements) {
skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-range-04059",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") equals VK_WHOLE_SIZE, the buffer's size (%" PRIuLEAST64 ") minus the offset (%" PRIuLEAST64
"), divided by the element size of the format (%" PRIu32
") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").",
range, buffer_state->createInfo.size, pCreateInfo->offset, format_size,
device_limits->maxTexelBufferElements);
}
}
return skip;
}
bool CoreChecks::ValidateBufferViewBuffer(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo) const {
bool skip = false;
const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->format);
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) {
skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-buffer-00933",
"vkCreateBufferView(): If buffer was created with `usage` containing "
"VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, format must "
"be supported for uniform texel buffers");
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) {
skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-buffer-00934",
"vkCreateBufferView(): If buffer was created with `usage` containing "
"VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, format must "
"be supported for storage texel buffers");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) const {
bool skip = false;
// TODO: Add check for "VUID-vkCreateBuffer-flags-00911" (sparse address space accounting)
auto chained_devaddr_struct = lvl_find_in_chain<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo->pNext);
if (chained_devaddr_struct) {
if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT) &&
chained_devaddr_struct->deviceAddress != 0) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-deviceAddress-02604",
"vkCreateBuffer(): Non-zero VkBufferDeviceAddressCreateInfoEXT::deviceAddress "
"requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT.");
}
}
auto chained_opaqueaddr_struct = lvl_find_in_chain<VkBufferOpaqueCaptureAddressCreateInfoKHR>(pCreateInfo->pNext);
if (chained_opaqueaddr_struct) {
if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR) &&
chained_opaqueaddr_struct->opaqueCaptureAddress != 0) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-opaqueCaptureAddress-03337",
"vkCreateBuffer(): Non-zero VkBufferOpaqueCaptureAddressCreateInfoKHR::opaqueCaptureAddress"
"requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR.");
}
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR) &&
!enabled_features.core12.bufferDeviceAddressCaptureReplay &&
!enabled_features.buffer_device_address_ext.bufferDeviceAddressCaptureReplay) {
skip |= LogError(
device, "VUID-VkBufferCreateInfo-flags-03338",
"vkCreateBuffer(): the bufferDeviceAddressCaptureReplay device feature is disabled: Buffers cannot be created with "
"the VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT set.");
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
skip |= ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices",
"VUID-VkBufferCreateInfo-sharingMode-01419");
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_PROTECTED_BIT) != 0) {
if (enabled_features.core11.protectedMemory == VK_FALSE) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-flags-01887",
"vkCreateBuffer(): the protectedMemory device feature is disabled: Buffers cannot be created with the "
"VK_BUFFER_CREATE_PROTECTED_BIT set.");
}
const VkBufferCreateFlags invalid_flags =
VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_ALIASED_BIT;
if ((pCreateInfo->flags & invalid_flags) != 0) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-None-01888",
"vkCreateBuffer(): VK_BUFFER_CREATE_PROTECTED_BIT is set so no sparse create flags can be used at "
"same time (VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | "
"VK_BUFFER_CREATE_SPARSE_ALIASED_BIT).");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBufferView *pView) const {
bool skip = false;
const BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer);
// If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
if (buffer_state) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCreateBufferView()", "VUID-VkBufferViewCreateInfo-buffer-00935");
// In order to create a valid buffer view, the buffer must have been created with at least one of the following flags:
// UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
skip |= ValidateBufferUsageFlags(buffer_state,
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
"VUID-VkBufferViewCreateInfo-buffer-00932", "vkCreateBufferView()",
"VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
// Buffer view offset must be less than the size of buffer
if (pCreateInfo->offset >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-VkBufferViewCreateInfo-offset-00925",
"vkCreateBufferView(): VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be less than the size of the buffer (%" PRIuLEAST64 ").",
pCreateInfo->offset, buffer_state->createInfo.size);
}
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
// Buffer view offset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment
if ((pCreateInfo->offset % device_limits->minTexelBufferOffsetAlignment) != 0 &&
!enabled_features.texel_buffer_alignment_features.texelBufferAlignment) {
const char *vuid = device_extensions.vk_ext_texel_buffer_alignment ? "VUID-VkBufferViewCreateInfo-offset-02749"
: "VUID-VkBufferViewCreateInfo-offset-00926";
skip |= LogError(buffer_state->buffer, vuid,
"vkCreateBufferView(): VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment (%" PRIuLEAST64 ").",
pCreateInfo->offset, device_limits->minTexelBufferOffsetAlignment);
}
if (enabled_features.texel_buffer_alignment_features.texelBufferAlignment) {
VkDeviceSize elementSize = FormatElementSize(pCreateInfo->format);
if ((elementSize % 3) == 0) {
elementSize /= 3;
}
if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) {
VkDeviceSize alignmentRequirement =
phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes;
if (phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment) {
alignmentRequirement = std::min(alignmentRequirement, elementSize);
}
if (SafeModulo(pCreateInfo->offset, alignmentRequirement) != 0) {
skip |= LogError(
buffer_state->buffer, "VUID-VkBufferViewCreateInfo-buffer-02750",
"vkCreateBufferView(): If buffer was created with usage containing "
"VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, "
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of the lesser of "
"VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64
") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetSingleTexelAlignment "
"(%" PRId32
") is VK_TRUE, the size of a texel of the requested format. "
"If the size of a texel is a multiple of three bytes, then the size of a "
"single component of format is used instead",
pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes,
phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment);
}
}
if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) {
VkDeviceSize alignmentRequirement =
phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes;
if (phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment) {
alignmentRequirement = std::min(alignmentRequirement, elementSize);
}
if (SafeModulo(pCreateInfo->offset, alignmentRequirement) != 0) {
skip |= LogError(
buffer_state->buffer, "VUID-VkBufferViewCreateInfo-buffer-02751",
"vkCreateBufferView(): If buffer was created with usage containing "
"VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, "
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of the lesser of "
"VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64
") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetSingleTexelAlignment "
"(%" PRId32
") is VK_TRUE, the size of a texel of the requested format. "
"If the size of a texel is a multiple of three bytes, then the size of a "
"single component of format is used instead",
pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes,
phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment);
}
}
}
skip |= ValidateBufferViewRange(buffer_state, pCreateInfo, device_limits);
skip |= ValidateBufferViewBuffer(buffer_state, pCreateInfo);
}
return skip;
}
// For the given format verify that the aspect masks make sense
bool CoreChecks::ValidateImageAspectMask(VkImage image, VkFormat format, VkImageAspectFlags aspect_mask, const char *func_name,
const char *vuid) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
// checks color format and (single-plane or non-disjoint)
// if ycbcr extension is not supported then single-plane and non-disjoint are always both true
if ((FormatIsColor(format)) && ((FormatIsMultiplane(format) == false) || (image_state->disjoint == false))) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= LogError(image, vuid, "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name);
} else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
skip |= LogError(image, vuid, "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name);
}
} else if (FormatIsDepthAndStencil(format)) {
if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
skip |= LogError(image, vuid,
"%s: Depth/stencil image formats must have at least one of VK_IMAGE_ASPECT_DEPTH_BIT and "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name);
} else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name);
}
} else if (FormatIsDepthOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
skip |= LogError(image, vuid, "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name);
} else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
skip |=
LogError(image, vuid, "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name);
}
} else if (FormatIsStencilOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
skip |=
LogError(image, vuid, "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name);
} else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
skip |= LogError(image, vuid, "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name);
}
} else if (FormatIsMultiplane(format)) {
VkImageAspectFlags valid_flags = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT;
if (3 == FormatPlaneCount(format)) {
valid_flags = valid_flags | VK_IMAGE_ASPECT_PLANE_2_BIT;
}
if ((aspect_mask & valid_flags) != aspect_mask) {
skip |=
LogError(image, vuid,
"%s: Multi-plane image formats may have only VK_IMAGE_ASPECT_COLOR_BIT or VK_IMAGE_ASPECT_PLANE_n_BITs "
"set, where n = [0, 1, 2].",
func_name);
}
}
return skip;
}
bool CoreChecks::ValidateImageSubresourceRange(const uint32_t image_mip_count, const uint32_t image_layer_count,
const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
const char *param_name, const char *image_layer_count_var_name, const VkImage image,
SubresourceRangeErrorCodes errorCodes) const {
bool skip = false;
// Validate mip levels
if (subresourceRange.baseMipLevel >= image_mip_count) {
skip |= LogError(image, errorCodes.base_mip_err,
"%s: %s.baseMipLevel (= %" PRIu32
") is greater or equal to the mip level count of the image (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, image_mip_count);
}
if (subresourceRange.levelCount != VK_REMAINING_MIP_LEVELS) {
if (subresourceRange.levelCount == 0) {
skip |=
LogError(image, "VUID-VkImageSubresourceRange-levelCount-01720", "%s: %s.levelCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_mip_count = uint64_t{subresourceRange.baseMipLevel} + uint64_t{subresourceRange.levelCount};
if (necessary_mip_count > image_mip_count) {
skip |= LogError(image, errorCodes.mip_count_err,
"%s: %s.baseMipLevel + .levelCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the mip level count of the image (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, subresourceRange.levelCount,
necessary_mip_count, image_mip_count);
}
}
}
// Validate array layers
if (subresourceRange.baseArrayLayer >= image_layer_count) {
skip |= LogError(image, errorCodes.base_layer_err,
"%s: %s.baseArrayLayer (= %" PRIu32
") is greater or equal to the %s of the image when it was created (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, image_layer_count_var_name, image_layer_count);
}
if (subresourceRange.layerCount != VK_REMAINING_ARRAY_LAYERS) {
if (subresourceRange.layerCount == 0) {
skip |=
LogError(image, "VUID-VkImageSubresourceRange-layerCount-01721", "%s: %s.layerCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_layer_count =
uint64_t{subresourceRange.baseArrayLayer} + uint64_t{subresourceRange.layerCount};
if (necessary_layer_count > image_layer_count) {
skip |= LogError(image, errorCodes.layer_count_err,
"%s: %s.baseArrayLayer + .layerCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the %s of the image when it was created (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, subresourceRange.layerCount,
necessary_layer_count, image_layer_count_var_name, image_layer_count);
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateImageViewSubresourceRange(const IMAGE_STATE *image_state, bool is_imageview_2d_type,
const VkImageSubresourceRange &subresourceRange) const {
bool is_khr_maintenance1 = IsExtEnabled(device_extensions.vk_khr_maintenance1);
bool is_image_slicable = image_state->createInfo.imageType == VK_IMAGE_TYPE_3D &&
(image_state->createInfo.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR);
bool is_3D_to_2D_map = is_khr_maintenance1 && is_image_slicable && is_imageview_2d_type;
const auto image_layer_count = is_3D_to_2D_map ? image_state->createInfo.extent.depth : image_state->createInfo.arrayLayers;
const auto image_layer_count_var_name = is_3D_to_2D_map ? "extent.depth" : "arrayLayers";
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageViewCreateInfo-subresourceRange-01478";
subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageViewCreateInfo-subresourceRange-01718";
subresourceRangeErrorCodes.base_layer_err = is_khr_maintenance1 ? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-image-02724"
: "VUID-VkImageViewCreateInfo-image-01482")
: "VUID-VkImageViewCreateInfo-subresourceRange-01480";
subresourceRangeErrorCodes.layer_count_err = is_khr_maintenance1
? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-subresourceRange-02725"
: "VUID-VkImageViewCreateInfo-subresourceRange-01483")
: "VUID-VkImageViewCreateInfo-subresourceRange-01719";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_layer_count, subresourceRange,
"vkCreateImageView", "pCreateInfo->subresourceRange", image_layer_count_var_name,
image_state->image, subresourceRangeErrorCodes);
}
bool CoreChecks::ValidateCmdClearColorSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange,
const char *param_name) const {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearColorImage-baseMipLevel-01470";
subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearColorImage-pRanges-01692";
subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearColorImage-baseArrayLayer-01472";
subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearColorImage-pRanges-01693";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
"vkCmdClearColorImage", param_name, "arrayLayers", image_state->image,
subresourceRangeErrorCodes);
}
bool CoreChecks::ValidateCmdClearDepthSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange,
const char *param_name) const {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474";
subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01694";
subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476";
subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01695";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
"vkCmdClearDepthStencilImage", param_name, "arrayLayers", image_state->image,
subresourceRangeErrorCodes);
}
bool CoreChecks::ValidateImageBarrierSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
const char *param_name) const {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageMemoryBarrier-subresourceRange-01486";
subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01724";
subresourceRangeErrorCodes.base_layer_err = "VUID-VkImageMemoryBarrier-subresourceRange-01488";
subresourceRangeErrorCodes.layer_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01725";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
cmd_name, param_name, "arrayLayers", image_state->image, subresourceRangeErrorCodes);
}
bool CoreChecks::ValidateImageViewFormatFeatures(const IMAGE_STATE *image_state, const VkFormat view_format,
const VkImageUsageFlags image_usage) const {
// Pass in image_usage here instead of extracting it from image_state in case there's a chained VkImageViewUsageCreateInfo
bool skip = false;
VkFormatFeatureFlags tiling_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
const VkImageTiling image_tiling = image_state->createInfo.tiling;
if (image_state->has_ahb_format == true) {
// AHB image view and image share same feature sets
tiling_features = image_state->format_features;
} else if (image_tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
// Parameter validation should catch if this is used without VK_EXT_image_drm_format_modifier
assert(device_extensions.vk_ext_image_drm_format_modifier);
VkImageDrmFormatModifierPropertiesEXT drm_format_properties = {VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
nullptr};
DispatchGetImageDrmFormatModifierPropertiesEXT(device, image_state->image, &drm_format_properties);
VkFormatProperties2 format_properties_2 = {VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, nullptr};
VkDrmFormatModifierPropertiesListEXT drm_properties_list = {VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT,
nullptr};
format_properties_2.pNext = (void *)&drm_properties_list;
DispatchGetPhysicalDeviceFormatProperties2(physical_device, view_format, &format_properties_2);
for (uint32_t i = 0; i < drm_properties_list.drmFormatModifierCount; i++) {
if ((drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifier & drm_format_properties.drmFormatModifier) !=
0) {
tiling_features |= drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifierTilingFeatures;
}
}
} else {
VkFormatProperties format_properties = GetPDFormatProperties(view_format);
tiling_features = (image_tiling == VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures
: format_properties.optimalTilingFeatures;
}
if (tiling_features == 0) {
skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-None-02273",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s has no supported format features on this "
"physical device.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02274",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_SAMPLED_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02275",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_STORAGE_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02276",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02277",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) &&
!(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) {
skip |= LogError(image_state->image, "VUID-VkImageViewCreateInfo-usage-02652",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(pCreateInfo->image);
if (image_state) {
skip |=
ValidateImageUsageFlags(image_state,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV |
VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT,
false, "VUID-VkImageViewCreateInfo-image-04441", "vkCreateImageView()",
"VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT|"
"TRANSIENT_ATTACHMENT|SHADING_RATE_IMAGE|FRAGMENT_DENSITY_MAP]_BIT");
// If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCreateImageView()", "VUID-VkImageViewCreateInfo-image-01020");
// Checks imported from image layer
skip |= ValidateCreateImageViewSubresourceRange(
image_state, pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D || pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY,
pCreateInfo->subresourceRange);
VkImageCreateFlags image_flags = image_state->createInfo.flags;
VkFormat image_format = image_state->createInfo.format;
VkImageUsageFlags image_usage = image_state->createInfo.usage;
VkFormat view_format = pCreateInfo->format;
VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask;
VkImageType image_type = image_state->createInfo.imageType;
VkImageViewType view_type = pCreateInfo->viewType;
// If there's a chained VkImageViewUsageCreateInfo struct, modify image_usage to match
auto chained_ivuci_struct = lvl_find_in_chain<VkImageViewUsageCreateInfoKHR>(pCreateInfo->pNext);
if (chained_ivuci_struct) {
if (device_extensions.vk_khr_maintenance2) {
if (!device_extensions.vk_ext_separate_stencil_usage) {
if ((image_usage | chained_ivuci_struct->usage) != image_usage) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02661",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, usage must not "
"include any bits that were not set in VkImageCreateInfo::usage used to create image");
}
} else {
const auto image_stencil_struct =
lvl_find_in_chain<VkImageStencilUsageCreateInfoEXT>(image_state->createInfo.pNext);
if (image_stencil_struct == nullptr) {
if ((image_usage | chained_ivuci_struct->usage) != image_usage) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02662",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo and image was not created "
"with a VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, usage must not include "
"any bits that were not set in VkImageCreateInfo::usage used to create image");
}
} else {
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) == VK_IMAGE_ASPECT_STENCIL_BIT &&
(image_stencil_struct->stencilUsage | chained_ivuci_struct->usage) !=
image_stencil_struct->stencilUsage) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02663",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, image was created with a "
"VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, and subResourceRange.aspectMask "
"includes VK_IMAGE_ASPECT_STENCIL_BIT, VkImageViewUsageCreateInfo::usage must not include any "
"bits that were not set in VkImageStencilUsageCreateInfo::stencilUsage used to create image");
}
if ((aspect_mask & ~VK_IMAGE_ASPECT_STENCIL_BIT) != 0 &&
(image_usage | chained_ivuci_struct->usage) != image_usage) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02664",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, image was created with a "
"VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, and subResourceRange.aspectMask "
"includes bits other than VK_IMAGE_ASPECT_STENCIL_BIT, VkImageViewUsageCreateInfo::usage must not "
"include any bits that were not set in VkImageCreateInfo::usage used to create image");
}
}
}
}
image_usage = chained_ivuci_struct->usage;
}
// Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state, if view/image formats differ
if ((image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) && (image_format != view_format)) {
if (FormatIsMultiplane(image_format)) {
VkFormat compat_format = FindMultiplaneCompatibleFormat(image_format, aspect_mask);
if (view_format != compat_format) {
// View format must match the multiplane compatible format
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not compatible with plane " << GetPlaneIndex(aspect_mask) << " of underlying image format "
<< string_VkFormat(image_format) << ", must be " << string_VkFormat(compat_format) << ".";
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01586", "%s", ss.str().c_str());
}
} else {
if ((!(image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR)) || (!FormatIsMultiplane(image_format))) {
// Format MUST be compatible (in the same format compatibility class) as the format the image was created with
if (FormatCompatibilityClass(image_format) != FormatCompatibilityClass(view_format)) {
const char *error_vuid;
if ((!device_extensions.vk_khr_maintenance2) && (!device_extensions.vk_khr_sampler_ycbcr_conversion)) {
error_vuid = "VUID-VkImageViewCreateInfo-image-01018";
} else if ((device_extensions.vk_khr_maintenance2) &&
(!device_extensions.vk_khr_sampler_ycbcr_conversion)) {
error_vuid = "VUID-VkImageViewCreateInfo-image-01759";
} else if ((!device_extensions.vk_khr_maintenance2) &&
(device_extensions.vk_khr_sampler_ycbcr_conversion)) {
error_vuid = "VUID-VkImageViewCreateInfo-image-01760";
} else {
// both enabled
error_vuid = "VUID-VkImageViewCreateInfo-image-01761";
}
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not in the same format compatibility class as "
<< report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format)
<< ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
<< "can support ImageViews with differing formats but they must be in the same compatibility class.";
skip |= LogError(pCreateInfo->image, error_vuid, "%s", ss.str().c_str());
}
}
}
} else {
// Format MUST be IDENTICAL to the format the image was created with
// Unless it is a multi-planar color bit aspect
if ((image_format != view_format) &&
((FormatIsMultiplane(image_format) == false) || (aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT))) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-VkImageViewCreateInfo-image-01762"
: "VUID-VkImageViewCreateInfo-image-01019";
std::stringstream ss;
ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from "
<< report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format)
<< ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
skip |= LogError(pCreateInfo->image, vuid, "%s", ss.str().c_str());
}
}
// Validate correct image aspect bits for desired formats and format consistency
skip |= ValidateImageAspectMask(image_state->image, image_format, aspect_mask, "vkCreateImageView()");
switch (image_type) {
case VK_IMAGE_TYPE_1D:
if (view_type != VK_IMAGE_VIEW_TYPE_1D && view_type != VK_IMAGE_VIEW_TYPE_1D_ARRAY) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
break;
case VK_IMAGE_TYPE_2D:
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
if ((view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) &&
!(image_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01003",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if (view_type != VK_IMAGE_VIEW_TYPE_CUBE && view_type != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
break;
case VK_IMAGE_TYPE_3D:
if (device_extensions.vk_khr_maintenance1) {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
if ((view_type == VK_IMAGE_VIEW_TYPE_2D || view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
if (!(image_flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) {
skip |=
LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01005",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if ((image_flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
VK_IMAGE_CREATE_SPARSE_ALIASED_BIT))) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s "
"when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
} else {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
} else {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
break;
default:
break;
}
// External format checks needed when VK_ANDROID_external_memory_android_hardware_buffer enabled
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageViewANDROID(pCreateInfo);
}
skip |= ValidateImageViewFormatFeatures(image_state, view_format, image_usage);
if (image_usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) {
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02086",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, viewType must be "
"VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_format != VK_FORMAT_R8_UINT) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02087",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, format must be VK_FORMAT_R8_UINT.");
}
}
if (pCreateInfo->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) {
if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE &&
image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer != 6) {
skip |= LogError(device, "VUID-VkImageViewCreateInfo-viewType-02962",
"vkCreateImageView(): subresourceRange.layerCount VK_REMAINING_ARRAY_LAYERS=(%d) must be 6",
image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer);
}
if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY &&
((image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer) % 6) != 0) {
skip |= LogError(
device, "VUID-VkImageViewCreateInfo-viewType-02963",
"vkCreateImageView(): subresourceRange.layerCount VK_REMAINING_ARRAY_LAYERS=(%d) must be a multiple of 6",
image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer);
}
}
if (image_usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) {
if (pCreateInfo->subresourceRange.levelCount != 1) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02571",
"vkCreateImageView(): If image was created with usage containing "
"VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, subresourceRange.levelCount (%d) must: be 1",
pCreateInfo->subresourceRange.levelCount);
}
}
if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT) {
if (!enabled_features.fragment_density_map_features.fragmentDensityMapDynamic) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-02572",
"vkCreateImageView(): If the fragmentDensityMapDynamic feature is not enabled, "
"flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT");
}
} else {
if (image_usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) {
if (image_flags & (VK_IMAGE_CREATE_PROTECTED_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT |
VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-04116",
"vkCreateImageView(): If image was created with usage containing "
"VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT flags must not contain any of "
"VK_IMAGE_CREATE_PROTECTED_BIT, VK_IMAGE_CREATE_SPARSE_BINDING_BIT, "
"VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or VK_IMAGE_CREATE_SPARSE_ALIASED_BIT");
}
}
}
if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT) {
if (!enabled_features.fragment_density_map2_features.fragmentDensityMapDeferred) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-03567",
"vkCreateImageView(): If the fragmentDensityMapDeferred feature is not enabled, "
"flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT");
}
if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT) {
skip |=
LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-03568",
"vkCreateImageView(): If flags contains VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT, "
"flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT");
}
}
if (device_extensions.vk_ext_fragment_density_map_2) {
if ((image_flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) && (image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) &&
(pCreateInfo->subresourceRange.layerCount >
phys_dev_ext_props.fragment_density_map2_props.maxSubsampledArrayLayers)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-03569",
"vkCreateImageView(): If image was created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT and usage containing VK_IMAGE_USAGE_SAMPLED_BIT "
"subresourceRange.layerCount (%d) must: be less than or equal to maxSubsampledArrayLayers (%d)",
pCreateInfo->subresourceRange.layerCount,
phys_dev_ext_props.fragment_density_map2_props.maxSubsampledArrayLayers);
}
}
auto astc_decode_mode = lvl_find_in_chain<VkImageViewASTCDecodeModeEXT>(pCreateInfo->pNext);
if ((device_extensions.vk_ext_astc_decode_mode) && (astc_decode_mode != nullptr)) {
if ((enabled_features.astc_decode_features.decodeModeSharedExponent == VK_FALSE) &&
(astc_decode_mode->decodeMode == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)) {
skip |= LogError(device, "VUID-VkImageViewASTCDecodeModeEXT-decodeMode-02231",
"vkCreateImageView(): decodeModeSharedExponent is not enabled but "
"VkImageViewASTCDecodeModeEXT::decodeMode is VK_FORMAT_E5B9G9R9_UFLOAT_PACK32.");
}
}
}
return skip;
}
bool CoreChecks::ValidateCmdCopyBufferBounds(const BUFFER_STATE *src_buffer_state, const BUFFER_STATE *dst_buffer_state,
uint32_t regionCount, const VkBufferCopy *pRegions) const {
bool skip = false;
VkDeviceSize src_buffer_size = src_buffer_state->createInfo.size;
VkDeviceSize dst_buffer_size = dst_buffer_state->createInfo.size;
VkDeviceSize src_min = UINT64_MAX;
VkDeviceSize src_max = 0;
VkDeviceSize dst_min = UINT64_MAX;
VkDeviceSize dst_max = 0;
for (uint32_t i = 0; i < regionCount; i++) {
src_min = std::min(src_min, pRegions[i].srcOffset);
src_max = std::max(src_max, (pRegions[i].srcOffset + pRegions[i].size));
dst_min = std::min(dst_min, pRegions[i].dstOffset);
dst_max = std::max(dst_max, (pRegions[i].dstOffset + pRegions[i].size));
// The srcOffset member of each element of pRegions must be less than the size of srcBuffer
if (pRegions[i].srcOffset >= src_buffer_size) {
skip |= LogError(src_buffer_state->buffer, "VUID-vkCmdCopyBuffer-srcOffset-00113",
"vkCmdCopyBuffer(): pRegions[%d].srcOffset (%" PRIuLEAST64
") is greater than pRegions[%d].size (%" PRIuLEAST64 ").",
i, pRegions[i].srcOffset, i, pRegions[i].size);
}
// The dstOffset member of each element of pRegions must be less than the size of dstBuffer
if (pRegions[i].dstOffset >= dst_buffer_size) {
skip |= LogError(dst_buffer_state->buffer, "VUID-vkCmdCopyBuffer-dstOffset-00114",
"vkCmdCopyBuffer(): pRegions[%d].dstOffset (%" PRIuLEAST64
") is greater than pRegions[%d].size (%" PRIuLEAST64 ").",
i, pRegions[i].dstOffset, i, pRegions[i].size);
}
// The size member of each element of pRegions must be less than or equal to the size of srcBuffer minus srcOffset
if (pRegions[i].size > (src_buffer_size - pRegions[i].srcOffset)) {
skip |= LogError(src_buffer_state->buffer, "VUID-vkCmdCopyBuffer-size-00115",
"vkCmdCopyBuffer(): pRegions[%d].size (%" PRIuLEAST64
") is greater than the source buffer size (%" PRIuLEAST64
") minus pRegions[%d].srcOffset (%" PRIuLEAST64 ").",
i, pRegions[i].size, src_buffer_size, i, pRegions[i].srcOffset);
}
// The size member of each element of pRegions must be less than or equal to the size of dstBuffer minus dstOffset
if (pRegions[i].size > (dst_buffer_size - pRegions[i].dstOffset)) {
skip |= LogError(dst_buffer_state->buffer, "VUID-vkCmdCopyBuffer-size-00116",
"vkCmdCopyBuffer(): pRegions[%d].size (%" PRIuLEAST64
") is greater than the destination buffer size (%" PRIuLEAST64
") minus pRegions[%d].dstOffset (%" PRIuLEAST64 ").",
i, pRegions[i].size, dst_buffer_size, i, pRegions[i].dstOffset);
}
}
// The union of the source regions, and the union of the destination regions, must not overlap in memory
if (src_buffer_state->buffer == dst_buffer_state->buffer) {
if (((src_min > dst_min) && (src_min < dst_max)) || ((src_max > dst_min) && (src_max < dst_max))) {
skip |= LogError(src_buffer_state->buffer, "VUID-vkCmdCopyBuffer-pRegions-00117",
"vkCmdCopyBuffer(): Detected overlap between source and dest regions in memory.");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(srcBuffer);
const auto dst_buffer_state = GetBufferState(dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-srcBuffer-00119");
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-dstBuffer-00121");
// Validate that SRC & DST buffers have correct usage flags set
skip |=
ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyBuffer-srcBuffer-00118",
"vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyBuffer-dstBuffer-00120",
"vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |=
ValidateCmdQueueFlags(cb_node, "vkCmdCopyBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
skip |= InsideRenderPass(cb_node, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-renderpass");
skip |= ValidateCmdCopyBufferBounds(src_buffer_state, dst_buffer_state, regionCount, pRegions);
skip |= ValidateProtectedBuffer(cb_node, src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01822");
skip |= ValidateProtectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01823");
skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01824");
return skip;
}
bool CoreChecks::ValidateIdleBuffer(VkBuffer buffer) const {
bool skip = false;
auto buffer_state = GetBufferState(buffer);
if (buffer_state) {
if (buffer_state->in_use.load()) {
skip |= LogError(buffer, "VUID-vkDestroyBuffer-buffer-00922", "Cannot free %s that is in use by a command buffer.",
report_data->FormatHandle(buffer).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView,
const VkAllocationCallbacks *pAllocator) const {
const IMAGE_VIEW_STATE *image_view_state = GetImageViewState(imageView);
const VulkanTypedHandle obj_struct(imageView, kVulkanObjectTypeImageView);
bool skip = false;
if (image_view_state) {
skip |=
ValidateObjectNotInUse(image_view_state, obj_struct, "vkDestroyImageView", "VUID-vkDestroyImageView-imageView-01026");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) const {
auto buffer_state = GetBufferState(buffer);
bool skip = false;
if (buffer_state) {
skip |= ValidateIdleBuffer(buffer);
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator) const {
auto buffer_view_state = GetBufferViewState(bufferView);
const VulkanTypedHandle obj_struct(bufferView, kVulkanObjectTypeBufferView);
bool skip = false;
if (buffer_view_state) {
skip |= ValidateObjectNotInUse(buffer_view_state, obj_struct, "vkDestroyBufferView",
"VUID-vkDestroyBufferView-bufferView-00936");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) const {
auto cb_node = GetCBState(commandBuffer);
auto buffer_state = GetBufferState(dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-dstBuffer-00031");
skip |=
ValidateCmdQueueFlags(cb_node, "vkCmdFillBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdFillBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdFillBuffer-dstBuffer-00029",
"vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= InsideRenderPass(cb_node, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-renderpass");
skip |= ValidateProtectedBuffer(cb_node, buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-commandBuffer-01811");
skip |= ValidateUnprotectedBuffer(cb_node, buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-commandBuffer-01812");
if (dstOffset >= buffer_state->createInfo.size) {
skip |= LogError(dstBuffer, "VUID-vkCmdFillBuffer-dstOffset-00024",
"vkCmdFillBuffer(): dstOffset (0x%" PRIxLEAST64
") is not less than destination buffer (%s) size (0x%" PRIxLEAST64 ").",
dstOffset, report_data->FormatHandle(dstBuffer).c_str(), buffer_state->createInfo.size);
}
if ((size != VK_WHOLE_SIZE) && (size > (buffer_state->createInfo.size - dstOffset))) {
skip |= LogError(dstBuffer, "VUID-vkCmdFillBuffer-size-00027",
"vkCmdFillBuffer(): size (0x%" PRIxLEAST64 ") is greater than dstBuffer (%s) size (0x%" PRIxLEAST64
") minus dstOffset (0x%" PRIxLEAST64 ").",
size, report_data->FormatHandle(dstBuffer).c_str(), buffer_state->createInfo.size, dstOffset);
}
return skip;
}
bool CoreChecks::ValidateBufferImageCopyData(const CMD_BUFFER_STATE *cb_node, uint32_t regionCount,
const VkBufferImageCopy *pRegions, const IMAGE_STATE *image_state,
const char *function) const {
bool skip = false;
assert(image_state != nullptr);
const VkFormat image_format = image_state->createInfo.format;
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageAspectFlags region_aspect_mask = pRegions[i].imageSubresource.aspectMask;
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((pRegions[i].imageOffset.y != 0) || (pRegions[i].imageExtent.height != 1)) {
skip |=
LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-srcImage-00199",
"%s(): pRegion[%d] imageOffset.y is %d and imageExtent.height is %d. For 1D images these must be 0 "
"and 1, respectively.",
function, i, pRegions[i].imageOffset.y, pRegions[i].imageExtent.height);
}
}
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) || (image_state->createInfo.imageType == VK_IMAGE_TYPE_2D)) {
if ((pRegions[i].imageOffset.z != 0) || (pRegions[i].imageExtent.depth != 1)) {
skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-srcImage-00201",
"%s(): pRegion[%d] imageOffset.z is %d and imageExtent.depth is %d. For 1D and 2D images these "
"must be 0 and 1, respectively.",
function, i, pRegions[i].imageOffset.z, pRegions[i].imageExtent.depth);
}
}
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != pRegions[i].imageSubresource.baseArrayLayer) || (1 != pRegions[i].imageSubresource.layerCount)) {
skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-baseArrayLayer-00213",
"%s(): pRegion[%d] imageSubresource.baseArrayLayer is %d and imageSubresource.layerCount is %d. "
"For 3D images these must be 0 and 1, respectively.",
function, i, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount);
}
}
// If the the calling command's VkImage parameter's format is not a depth/stencil format,
// then bufferOffset must be a multiple of the calling command's VkImage parameter's element size
uint32_t element_size = FormatElementSize(image_format, region_aspect_mask);
// If not depth/stencil and not multi-plane
if ((!FormatIsDepthAndStencil(image_format) && !FormatIsMultiplane(image_format)) &&
SafeModulo(pRegions[i].bufferOffset, element_size) != 0) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? "VUID-vkCmdCopyBufferToImage-bufferOffset-01558"
: "VUID-vkCmdCopyBufferToImage-bufferOffset-00193";
skip |= LogError(image_state->image, vuid,
"%s(): pRegion[%d] bufferOffset 0x%" PRIxLEAST64
" must be a multiple of this format's texel size (%" PRIu32 ").",
function, i, pRegions[i].bufferOffset, element_size);
}
// BufferRowLength must be 0, or greater than or equal to the width member of imageExtent
if ((pRegions[i].bufferRowLength != 0) && (pRegions[i].bufferRowLength < pRegions[i].imageExtent.width)) {
skip |=
LogError(image_state->image, "VUID-VkBufferImageCopy-bufferRowLength-00195",
"%s(): pRegion[%d] bufferRowLength (%d) must be zero or greater-than-or-equal-to imageExtent.width (%d).",
function, i, pRegions[i].bufferRowLength, pRegions[i].imageExtent.width);
}
// BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent
if ((pRegions[i].bufferImageHeight != 0) && (pRegions[i].bufferImageHeight < pRegions[i].imageExtent.height)) {
skip |= LogError(
image_state->image, "VUID-VkBufferImageCopy-bufferImageHeight-00196",
"%s(): pRegion[%d] bufferImageHeight (%d) must be zero or greater-than-or-equal-to imageExtent.height (%d).",
function, i, pRegions[i].bufferImageHeight, pRegions[i].imageExtent.height);
}
// Calculate adjusted image extent, accounting for multiplane image factors
VkExtent3D adjusted_image_extent = GetImageSubresourceExtent(image_state, &pRegions[i].imageSubresource);
// imageOffset.x and (imageExtent.width + imageOffset.x) must both be >= 0 and <= image subresource width
if ((pRegions[i].imageOffset.x < 0) || (pRegions[i].imageOffset.x > static_cast<int32_t>(adjusted_image_extent.width)) ||
((pRegions[i].imageOffset.x + static_cast<int32_t>(pRegions[i].imageExtent.width)) >
static_cast<int32_t>(adjusted_image_extent.width))) {
skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-imageOffset-00197",
"%s(): Both pRegion[%d] imageoffset.x (%d) and (imageExtent.width + imageOffset.x) (%d) must be >= "
"zero or <= image subresource width (%d).",
function, i, pRegions[i].imageOffset.x, (pRegions[i].imageOffset.x + pRegions[i].imageExtent.width),
adjusted_image_extent.width);
}
// imageOffset.y and (imageExtent.height + imageOffset.y) must both be >= 0 and <= image subresource height
if ((pRegions[i].imageOffset.y < 0) || (pRegions[i].imageOffset.y > static_cast<int32_t>(adjusted_image_extent.height)) ||
((pRegions[i].imageOffset.y + static_cast<int32_t>(pRegions[i].imageExtent.height)) >
static_cast<int32_t>(adjusted_image_extent.height))) {
skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-imageOffset-00198",
"%s(): Both pRegion[%d] imageoffset.y (%d) and (imageExtent.height + imageOffset.y) (%d) must be >= "
"zero or <= image subresource height (%d).",
function, i, pRegions[i].imageOffset.y, (pRegions[i].imageOffset.y + pRegions[i].imageExtent.height),
adjusted_image_extent.height);
}
// imageOffset.z and (imageExtent.depth + imageOffset.z) must both be >= 0 and <= image subresource depth
if ((pRegions[i].imageOffset.z < 0) || (pRegions[i].imageOffset.z > static_cast<int32_t>(adjusted_image_extent.depth)) ||
((pRegions[i].imageOffset.z + static_cast<int32_t>(pRegions[i].imageExtent.depth)) >
static_cast<int32_t>(adjusted_image_extent.depth))) {
skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-imageOffset-00200",
"%s(): Both pRegion[%d] imageoffset.z (%d) and (imageExtent.depth + imageOffset.z) (%d) must be >= "
"zero or <= image subresource depth (%d).",
function, i, pRegions[i].imageOffset.z, (pRegions[i].imageOffset.z + pRegions[i].imageExtent.depth),
adjusted_image_extent.depth);
}
// subresource aspectMask must have exactly 1 bit set
const int num_bits = sizeof(VkFlags) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(region_aspect_mask);
if (aspect_mask_bits.count() != 1) {
skip |= LogError(image_state->image, "VUID-VkBufferImageCopy-aspectMask-00212",
"%s(): aspectMasks for imageSubresource in pRegion[%d] must have only a single bit set.", function, i);
}
// image subresource aspect bit must match format
if (!VerifyAspectsPresent(region_aspect_mask, image_format)) {
skip |= LogError(
image_state->image, "VUID-vkCmdCopyBufferToImage-aspectMask-00211",
"%s(): pRegion[%d] subresource aspectMask 0x%x specifies aspects that are not present in image format 0x%x.",
function, i, region_aspect_mask, image_format);
}
// Checks that apply only to compressed images
if (FormatIsCompressed(image_format) || FormatIsSinglePlane_422(image_format)) {
auto block_size = FormatTexelBlockExtent(image_format);
// BufferRowLength must be a multiple of block width
if (SafeModulo(pRegions[i].bufferRowLength, block_size.width) != 0) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? "VUID-vkCmdCopyBufferToImage-bufferRowLength-00203"
: "VUID-vkCmdCopyBufferToImage-bufferRowLength-00203";
skip |= LogError(
image_state->image, vuid,
"%s(): pRegion[%d] bufferRowLength (%d) must be a multiple of the compressed image's texel width (%d)..",
function, i, pRegions[i].bufferRowLength, block_size.width);
}
// BufferRowHeight must be a multiple of block height
if (SafeModulo(pRegions[i].bufferImageHeight, block_size.height) != 0) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? "VUID-vkCmdCopyBufferToImage-bufferImageHeight-00204"
: "VUID-vkCmdCopyBufferToImage-bufferImageHeight-00204";
skip |= LogError(
image_state->image, vuid,
"%s(): pRegion[%d] bufferImageHeight (%d) must be a multiple of the compressed image's texel height (%d)..",
function, i, pRegions[i].bufferImageHeight, block_size.height);
}
// image offsets must be multiples of block dimensions
if ((SafeModulo(pRegions[i].imageOffset.x, block_size.width) != 0) ||
(SafeModulo(pRegions[i].imageOffset.y, block_size.height) != 0) ||
(SafeModulo(pRegions[i].imageOffset.z, block_size.depth) != 0)) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? "VUID-vkCmdCopyBufferToImage-imageOffset-00205"
: "VUID-vkCmdCopyBufferToImage-imageOffset-00205";
skip |= LogError(image_state->image, vuid,
"%s(): pRegion[%d] imageOffset(x,y) (%d, %d) must be multiples of the compressed image's texel "
"width & height (%d, %d)..",
function, i, pRegions[i].imageOffset.x, pRegions[i].imageOffset.y, block_size.width,
block_size.height);
}
// bufferOffset must be a multiple of block size (linear bytes)
uint32_t block_size_in_bytes = FormatElementSize(image_format);
if (SafeModulo(pRegions[i].bufferOffset, block_size_in_bytes) != 0) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? "VUID-vkCmdCopyBufferToImage-bufferOffset-00206"
: "VUID-vkCmdCopyBufferToImage-bufferOffset-00206";
skip |= LogError(image_state->image, vuid,
"%s(): pRegion[%d] bufferOffset (0x%" PRIxLEAST64
") must be a multiple of the compressed image's texel block size (%" PRIu32 ")..",
function, i, pRegions[i].bufferOffset, block_size_in_bytes);
}
// imageExtent width must be a multiple of block width, or extent+offset width must equal subresource width
VkExtent3D mip_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource));
if ((SafeModulo(pRegions[i].imageExtent.width, block_size.width) != 0) &&
(pRegions[i].imageExtent.width + pRegions[i].imageOffset.x != mip_extent.width)) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? "VUID-vkCmdCopyBufferToImage-imageExtent-00207"
: "VUID-vkCmdCopyBufferToImage-imageExtent-00207";
skip |= LogError(image_state->image, vuid,
"%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block width "
"(%d), or when added to offset.x (%d) must equal the image subresource width (%d)..",
function, i, pRegions[i].imageExtent.width, block_size.width, pRegions[i].imageOffset.x,
mip_extent.width);
}
// imageExtent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(pRegions[i].imageExtent.height, block_size.height) != 0) &&
(pRegions[i].imageExtent.height + pRegions[i].imageOffset.y != mip_extent.height)) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? "VUID-vkCmdCopyBufferToImage-imageExtent-00208"
: "VUID-vkCmdCopyBufferToImage-imageExtent-00208";
skip |= LogError(image_state->image, vuid,
"%s(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block height "
"(%d), or when added to offset.y (%d) must equal the image subresource height (%d)..",
function, i, pRegions[i].imageExtent.height, block_size.height, pRegions[i].imageOffset.y,
mip_extent.height);
}
// imageExtent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
if ((SafeModulo(pRegions[i].imageExtent.depth, block_size.depth) != 0) &&
(pRegions[i].imageExtent.depth + pRegions[i].imageOffset.z != mip_extent.depth)) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? "VUID-vkCmdCopyBufferToImage-imageExtent-00209"
: "VUID-vkCmdCopyBufferToImage-imageExtent-00209";
skip |= LogError(image_state->image, vuid,
"%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block depth "
"(%d), or when added to offset.z (%d) must equal the image subresource depth (%d)..",
function, i, pRegions[i].imageExtent.depth, block_size.depth, pRegions[i].imageOffset.z,
mip_extent.depth);
}
}
// Checks that apply only to multi-planar format images
if (FormatIsMultiplane(image_format)) {
// VK_IMAGE_ASPECT_PLANE_2_BIT valid only for image formats with three planes
if ((FormatPlaneCount(image_format) < 3) && (region_aspect_mask == VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-aspectMask-01560",
"%s(): pRegion[%d] subresource aspectMask cannot be VK_IMAGE_ASPECT_PLANE_2_BIT unless image "
"format has three planes.",
function, i);
}
// image subresource aspectMask must be VK_IMAGE_ASPECT_PLANE_*_BIT
if (0 ==
(region_aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT))) {
skip |= LogError(image_state->image, "VUID-vkCmdCopyBufferToImage-aspectMask-01560",
"%s(): pRegion[%d] subresource aspectMask for multi-plane image formats must have a "
"VK_IMAGE_ASPECT_PLANE_*_BIT when copying to or from.",
function, i);
} else {
// Know aspect mask is valid
const VkFormat compatible_format = FindMultiplaneCompatibleFormat(image_format, region_aspect_mask);
const uint32_t compatible_size = FormatElementSize(compatible_format);
if (SafeModulo(pRegions[i].bufferOffset, compatible_size) != 0) {
skip |= LogError(
image_state->image, "VUID-vkCmdCopyBufferToImage-bufferOffset-01559",
"%s(): pRegion[%d]->bufferOffset is 0x%" PRIxLEAST64
" but must be a multiple of the multi-plane compatible format's texel size (%u) for plane %u (%s).",
function, i, pRegions[i].bufferOffset, element_size, GetPlaneIndex(region_aspect_mask),
string_VkFormat(compatible_format));
}
}
}
// Checks depth or stencil aspect are used in graphics queue
if ((region_aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0) {
assert(cb_node != nullptr);
const COMMAND_POOL_STATE *command_pool = cb_node->command_pool.get();
if (command_pool != nullptr) {
const uint32_t queueFamilyIndex = command_pool->queueFamilyIndex;
const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queueFamilyIndex].queueFlags;
if ((queue_flags & VK_QUEUE_GRAPHICS_BIT) == 0) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(command_pool->commandPool);
// TODO - Label when future headers get merged in from internral MR 4077 fix
skip |=
LogError(image_state->image, "UNASSIGNED-VkBufferImageCopy-aspectMask",
"%s(): pRegion[%d] subresource aspectMask 0x%x specifies VK_IMAGE_ASPECT_DEPTH_BIT or "
"VK_IMAGE_ASPECT_STENCIL_BIT but the command buffer %s was allocated from the command pool %s "
"which was create with queueFamilyIndex %u which doesn't contain the VK_QUEUE_GRAPHICS_BIT flag.",
function, i, region_aspect_mask, report_data->FormatHandle(cb_node->commandBuffer).c_str(),
report_data->FormatHandle(command_pool->commandPool).c_str(), queueFamilyIndex);
}
}
}
}
return skip;
}
bool CoreChecks::ValidateImageBounds(const IMAGE_STATE *image_state, const uint32_t regionCount, const VkBufferImageCopy *pRegions,
const char *func_name, const char *msg_code) const {
bool skip = false;
const VkImageCreateInfo *image_info = &(image_state->createInfo);
for (uint32_t i = 0; i < regionCount; i++) {
VkExtent3D extent = pRegions[i].imageExtent;
VkOffset3D offset = pRegions[i].imageOffset;
if (IsExtentSizeZero(&extent)) // Warn on zero area subresource
{
skip |= LogWarning(image_state->image, kVUID_Core_Image_ZeroAreaSubregion,
"%s: pRegion[%d] imageExtent of {%1d, %1d, %1d} has zero area", func_name, i, extent.width,
extent.height, extent.depth);
}
VkExtent3D image_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource));
// If we're using a compressed format, valid extent is rounded up to multiple of block size (per 18.1)
if (FormatIsCompressed(image_info->format) || FormatIsSinglePlane_422(image_state->createInfo.format)) {
auto block_extent = FormatTexelBlockExtent(image_info->format);
if (image_extent.width % block_extent.width) {
image_extent.width += (block_extent.width - (image_extent.width % block_extent.width));
}
if (image_extent.height % block_extent.height) {
image_extent.height += (block_extent.height - (image_extent.height % block_extent.height));
}
if (image_extent.depth % block_extent.depth) {
image_extent.depth += (block_extent.depth - (image_extent.depth % block_extent.depth));
}
}
if (0 != ExceedsBounds(&offset, &extent, &image_extent)) {
skip |= LogError(image_state->image, msg_code, "%s: pRegion[%d] exceeds image bounds..", func_name, i);
}
}
return skip;
}
bool CoreChecks::ValidateBufferBounds(const IMAGE_STATE *image_state, const BUFFER_STATE *buff_state, uint32_t regionCount,
const VkBufferImageCopy *pRegions, const char *func_name, const char *msg_code) const {
bool skip = false;
VkDeviceSize buffer_size = buff_state->createInfo.size;
for (uint32_t i = 0; i < regionCount; i++) {
VkDeviceSize max_buffer_offset =
GetBufferSizeFromCopyImage(pRegions[i], image_state->createInfo.format) + pRegions[i].bufferOffset;
if (buffer_size < max_buffer_offset) {
skip |=
LogError(device, msg_code, "%s: pRegion[%d] exceeds buffer size of %" PRIu64 " bytes..", func_name, i, buffer_size);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount,
const VkBufferImageCopy *pRegions) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_image_state = GetImageState(srcImage);
const auto dst_buffer_state = GetBufferState(dstBuffer);
bool skip = ValidateBufferImageCopyData(cb_node, regionCount, pRegions, src_image_state, "vkCmdCopyImageToBuffer");
// Validate command buffer state
skip |= ValidateCmd(cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
// Command pool must support graphics, compute, or transfer operations
const auto pPool = cb_node->command_pool.get();
VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
skip |=
LogError(cb_node->createInfo.commandPool, "VUID-vkCmdCopyImageToBuffer-commandBuffer-cmdpool",
"Cannot call vkCmdCopyImageToBuffer() on a command buffer allocated from a pool without graphics, compute, "
"or transfer capabilities..");
}
skip |= ValidateImageBounds(src_image_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-pRegions-00182");
skip |= ValidateBufferBounds(src_image_state, dst_buffer_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-pRegions-00183");
skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage",
"VUID-vkCmdCopyImageToBuffer-srcImage-00188");
skip |= ValidateMemoryIsBoundToImage(src_image_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-srcImage-00187");
skip |=
ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-dstBuffer-00192");
// Validate that SRC image & DST buffer have correct usage flags set
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
"VUID-vkCmdCopyImageToBuffer-srcImage-00186", "vkCmdCopyImageToBuffer()",
"VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyImageToBuffer-dstBuffer-00191", "vkCmdCopyImageToBuffer()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateProtectedImage(cb_node, src_image_state, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-commandBuffer-01831");
skip |= ValidateProtectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-commandBuffer-01832");
skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-commandBuffer-01833");
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
skip |= LogError(cb_node->commandBuffer, "vkCmdCopyImageToBuffer-srcImage-02544",
"vkCmdCopyBufferToImage(): srcImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT");
}
if (device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-srcImage-01998");
}
skip |= InsideRenderPass(cb_node, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-renderpass");
bool hit_error = false;
const char *src_invalid_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImageToBuffer-srcImageLayout-01397"
: "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00190";
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, "vkCmdCopyImageToBuffer()",
"imageSubresource", i);
skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].imageSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImageToBuffer()", src_invalid_layout_vuid,
"VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189", &hit_error);
skip |= ValidateCopyBufferImageTransferGranularityRequirements(
cb_node, src_image_state, &pRegions[i], i, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-imageOffset-01794");
skip |=
ValidateImageMipLevel(cb_node, src_image_state, pRegions[i].imageSubresource.mipLevel, i, "vkCmdCopyImageToBuffer()",
"imageSubresource", "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703");
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyImageToBuffer()",
"imageSubresource", "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704");
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
// Make sure that all image slices record referenced layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].imageSubresource, srcImageLayout);
}
}
bool CoreChecks::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(srcBuffer);
const auto dst_image_state = GetImageState(dstImage);
bool skip = ValidateBufferImageCopyData(cb_node, regionCount, pRegions, dst_image_state, "vkCmdCopyBufferToImage");
// Validate command buffer state
skip |= ValidateCmd(cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
// Command pool must support graphics, compute, or transfer operations
const auto pPool = cb_node->command_pool.get();
VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
skip |=
LogError(cb_node->createInfo.commandPool, "VUID-vkCmdCopyBufferToImage-commandBuffer-cmdpool",
"Cannot call vkCmdCopyBufferToImage() on a command buffer allocated from a pool without graphics, compute, "
"or transfer capabilities..");
}
skip |= ValidateImageBounds(dst_image_state, regionCount, pRegions, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-pRegions-00172");
skip |= ValidateBufferBounds(dst_image_state, src_buffer_state, regionCount, pRegions, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-pRegions-00171");
skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage",
"VUID-vkCmdCopyBufferToImage-dstImage-00179");
skip |=
ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-srcBuffer-00176");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-dstImage-00178");
skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
"VUID-vkCmdCopyBufferToImage-srcBuffer-00174", "vkCmdCopyBufferToImage()",
"VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyBufferToImage-dstImage-00177", "vkCmdCopyBufferToImage()",
"VK_IMAGE_USAGE_TRANSFER_DST_BIT");
skip |= ValidateProtectedBuffer(cb_node, src_buffer_state, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-commandBuffer-01828");
skip |= ValidateProtectedImage(cb_node, dst_image_state, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-commandBuffer-01829");
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-commandBuffer-01830");
// Validation for VK_EXT_fragment_density_map
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
skip |= LogError(cb_node->commandBuffer, "vkCmdCopyBufferToImage-dstImage-02543",
"vkCmdCopyBufferToImage(): dstImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT");
}
if (device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-dstImage-01997");
}
skip |= InsideRenderPass(cb_node, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-renderpass");
bool hit_error = false;
const char *dst_invalid_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyBufferToImage-dstImageLayout-01396"
: "VUID-vkCmdCopyBufferToImage-dstImageLayout-00181";
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, "vkCmdCopyBufferToImage()",
"imageSubresource", i);
skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].imageSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyBufferToImage()", dst_invalid_layout_vuid,
"VUID-vkCmdCopyBufferToImage-dstImageLayout-00180", &hit_error);
skip |= ValidateCopyBufferImageTransferGranularityRequirements(
cb_node, dst_image_state, &pRegions[i], i, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-imageOffset-01793");
skip |=
ValidateImageMipLevel(cb_node, dst_image_state, pRegions[i].imageSubresource.mipLevel, i, "vkCmdCopyBufferToImage()",
"imageSubresource", "VUID-vkCmdCopyBufferToImage-imageSubresource-01701");
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyBufferToImage()",
"imageSubresource", "VUID-vkCmdCopyBufferToImage-imageSubresource-01702");
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
auto cb_node = GetCBState(commandBuffer);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are record referenced layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].imageSubresource, dstImageLayout);
}
}
bool CoreChecks::PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
VkSubresourceLayout *pLayout) const {
bool skip = false;
const VkImageAspectFlags sub_aspect = pSubresource->aspectMask;
// The aspectMask member of pSubresource must only have a single bit set
const int num_bits = sizeof(sub_aspect) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(sub_aspect);
if (aspect_mask_bits.count() != 1) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-aspectMask-00997",
"vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must have exactly 1 bit set.");
}
const IMAGE_STATE *image_entry = GetImageState(image);
if (!image_entry) {
return skip;
}
// Image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR
if (device_extensions.vk_ext_image_drm_format_modifier) {
if ((image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) &&
(image_entry->createInfo.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-02270",
"vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR or "
"VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT.");
}
} else {
if (image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-00996",
"vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR.");
}
}
// mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created
if (pSubresource->mipLevel >= image_entry->createInfo.mipLevels) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-mipLevel-01716",
"vkGetImageSubresourceLayout(): pSubresource.mipLevel (%d) must be less than %d.", pSubresource->mipLevel,
image_entry->createInfo.mipLevels);
}
// arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created
if (pSubresource->arrayLayer >= image_entry->createInfo.arrayLayers) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-arrayLayer-01717",
"vkGetImageSubresourceLayout(): pSubresource.arrayLayer (%d) must be less than %d.",
pSubresource->arrayLayer, image_entry->createInfo.arrayLayers);
}
// subresource's aspect must be compatible with image's format.
const VkFormat img_format = image_entry->createInfo.format;
if (image_entry->createInfo.tiling == VK_IMAGE_TILING_LINEAR) {
if (FormatIsMultiplane(img_format)) {
VkImageAspectFlags allowed_flags = (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
const char *vuid = "VUID-vkGetImageSubresourceLayout-format-01581"; // 2-plane version
if (FormatPlaneCount(img_format) > 2u) {
allowed_flags |= VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
vuid = "VUID-vkGetImageSubresourceLayout-format-01582"; // 3-plane version
}
if (sub_aspect != (sub_aspect & allowed_flags)) {
skip |= LogError(image, vuid,
"vkGetImageSubresourceLayout(): For multi-planar images, VkImageSubresource.aspectMask (0x%" PRIx32
") must be a single-plane specifier flag.",
sub_aspect);
}
} else if (FormatIsColor(img_format)) {
if (sub_aspect != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= LogError(image, kVUID_Core_DrawState_InvalidImageAspect,
"vkGetImageSubresourceLayout(): For color formats, VkImageSubresource.aspectMask must be "
"VK_IMAGE_ASPECT_COLOR.");
}
} else if (FormatIsDepthOrStencil(img_format)) {
if ((sub_aspect != VK_IMAGE_ASPECT_DEPTH_BIT) && (sub_aspect != VK_IMAGE_ASPECT_STENCIL_BIT)) {
}
}
} else if (image_entry->createInfo.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
if ((sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT) && (sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT) &&
(sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT) && (sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
// TODO: This VU also needs to ensure that the DRM index is in range and valid.
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-tiling-02271",
"vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must be "
"VK_IMAGE_ASPECT_MEMORY_PLANE_i_BIT_EXT.");
}
}
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageSubresourceLayoutANDROID(image);
}
return skip;
}
// Validates the image is allowed to be protected
bool CoreChecks::ValidateProtectedImage(const CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state, const char *cmd_name,
const char *vuid) const {
bool skip = false;
if ((cb_state->unprotected == true) && (image_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(image_state->image);
skip |= LogError(objlist, vuid, "%s: command buffer %s is unprotected while image %s is a protected image", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(image_state->image).c_str());
}
return skip;
}
// Validates the image is allowed to be unprotected
bool CoreChecks::ValidateUnprotectedImage(const CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state, const char *cmd_name,
const char *vuid) const {
bool skip = false;
if ((cb_state->unprotected == false) && (image_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(image_state->image);
skip |= LogError(objlist, vuid, "%s: command buffer %s is protected while image %s is an unprotected image", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(image_state->image).c_str());
}
return skip;
}
// Validates the buffer is allowed to be protected
bool CoreChecks::ValidateProtectedBuffer(const CMD_BUFFER_STATE *cb_state, const BUFFER_STATE *buffer_state, const char *cmd_name,
const char *vuid) const {
bool skip = false;
if ((cb_state->unprotected == true) && (buffer_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(buffer_state->buffer);
skip |= LogError(objlist, vuid, "%s: command buffer %s is unprotected while buffer %s is a protected buffer", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
// Validates the buffer is allowed to be unprotected
bool CoreChecks::ValidateUnprotectedBuffer(const CMD_BUFFER_STATE *cb_state, const BUFFER_STATE *buffer_state, const char *cmd_name,
const char *vuid) const {
bool skip = false;
if ((cb_state->unprotected == false) && (buffer_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(buffer_state->buffer);
skip |= LogError(objlist, vuid, "%s: command buffer %s is protected while buffer %s is an unprotected buffer", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
| 1 | 14,168 | I guess nothing is wrong with this approach, but more curious if you didn't just go `|| (format != VK_FORMAT_UNDEFINED)) {` As if there ever was another external format system added in Vulkan it would need to be manually added here | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -22,7 +22,8 @@
package com.github.javaparser.resolution.types;
import java.util.*;
-import java.util.stream.Collectors;
+
+import static java.util.stream.Collectors.joining;
/**
* A union type is defined in java as list of types separates by pipes. | 1 | /*
* Copyright (C) 2007-2010 Júlio Vilmar Gesser.
* Copyright (C) 2011, 2013-2016 The JavaParser Team.
*
* This file is part of JavaParser.
*
* JavaParser can be used either under the terms of
* a) the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* b) the terms of the Apache License
*
* You should have received a copy of both licenses in LICENCE.LGPL and
* LICENCE.APACHE. Please refer to those files for details.
*
* JavaParser is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
package com.github.javaparser.resolution.types;
import java.util.*;
import java.util.stream.Collectors;
/**
* A union type is defined in java as list of types separates by pipes.
*
* @author Federico Tomassetti
*/
public class ResolvedUnionType implements ResolvedType {
private List<ResolvedType> elements;
public ResolvedUnionType(List<ResolvedType> elements) {
if (elements.size() < 2) {
throw new IllegalArgumentException("An union type should have at least two elements. This has " + elements.size());
}
this.elements = new LinkedList<>(elements);
}
public Optional<ResolvedReferenceType> getCommonAncestor() {
Optional<List<ResolvedReferenceType>> reduce = elements.stream()
.map(ResolvedType::asReferenceType)
.map(ResolvedReferenceType::getAllAncestors)
.reduce((a, b) -> {
ArrayList<ResolvedReferenceType> common = new ArrayList<>(a);
common.retainAll(b);
return common;
});
return reduce.orElse(new ArrayList<>()).stream().findFirst();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ResolvedUnionType that = (ResolvedUnionType) o;
return new HashSet<>(elements).equals(new HashSet<>(that.elements));
}
@Override
public int hashCode() {
return new HashSet<>(elements).hashCode();
}
@Override
public String describe() {
return String.join(" | ", elements.stream().map(ResolvedType::describe).collect(Collectors.toList()));
}
@Override
public boolean isAssignableBy(ResolvedType other) {
return elements.stream().allMatch(e -> e.isAssignableBy(other));
}
@Override
public boolean isUnionType() {
return true;
}
@Override
public ResolvedUnionType asUnionType() {
return this;
}
}
| 1 | 13,388 | Is order really irrelevant here? | javaparser-javaparser | java |
@@ -15,11 +15,14 @@ import android.net.Uri;
import android.provider.MediaStore;
import android.speech.RecognizerIntent;
import android.support.annotation.NonNull;
+import android.support.design.widget.Snackbar;
import android.util.Base64;
import android.view.View;
import android.webkit.MimeTypeMap;
+import android.widget.Toast;
import org.fossasia.phimpme.R;
+import org.fossasia.phimpme.accounts.AccountActivity;
import org.fossasia.phimpme.data.local.AccountDatabase;
import java.io.ByteArrayOutputStream; | 1 | package org.fossasia.phimpme.utilities;
import android.app.Activity;
import android.content.ActivityNotFoundException;
import android.content.ClipData;
import android.content.ClipboardManager;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.net.Uri;
import android.provider.MediaStore;
import android.speech.RecognizerIntent;
import android.support.annotation.NonNull;
import android.util.Base64;
import android.view.View;
import android.webkit.MimeTypeMap;
import org.fossasia.phimpme.R;
import org.fossasia.phimpme.data.local.AccountDatabase;
import java.io.ByteArrayOutputStream;
import java.util.ArrayList;
import java.util.Locale;
import io.realm.Realm;
import io.realm.RealmQuery;
import io.realm.RealmResults;
import static android.content.Context.CLIPBOARD_SERVICE;
import static org.fossasia.phimpme.utilities.Constants.PACKAGE_INSTAGRAM;
import static org.fossasia.phimpme.utilities.Constants.PACKAGE_MESSENGER;
import static org.fossasia.phimpme.utilities.Constants.PACKAGE_SNAPCHAT;
import static org.fossasia.phimpme.utilities.Constants.PACKAGE_WHATSAPP;
/**
* Created by pa1pal on 23/5/17.
*/
public class Utils {
public static Bitmap getBitmapFromPath(String path) {
BitmapFactory.Options bmOptions = new BitmapFactory.Options();
return BitmapFactory.decodeFile(path, bmOptions);
}
public static void copyToClipBoard(Context context, String msg) {
ClipboardManager myClipboard = (ClipboardManager) context.getSystemService(CLIPBOARD_SERVICE);
ClipData myClip = ClipData.newPlainText("text", msg);
myClipboard.setPrimaryClip(myClip);
}
public static String getStringImage(Bitmap bmp) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
bmp.compress(Bitmap.CompressFormat.JPEG, 100, baos);
byte[] imageBytes = baos.toByteArray();
return Base64.encodeToString(imageBytes, Base64.DEFAULT);
}
public static void shareMsgOnIntent(Context context, String msg) {
Intent sendIntent = new Intent();
sendIntent.setAction(Intent.ACTION_SEND);
sendIntent.putExtra(Intent.EXTRA_TEXT, msg);
sendIntent.setType("text/plain");
context.startActivity(sendIntent);
}
public static boolean isAppInstalled(String packageName, PackageManager pm) {
boolean installed;
try {
pm.getPackageInfo(packageName, PackageManager.GET_ACTIVITIES);
installed = true;
} catch (PackageManager.NameNotFoundException e) {
installed = false;
}
return installed;
}
public static boolean isInternetOn(Context context) {
try {
ConnectivityManager connectivityManager =
(ConnectivityManager) context.getSystemService(Context.CONNECTIVITY_SERVICE);
if (connectivityManager != null) {
NetworkInfo activeNetworkInfo = connectivityManager.getActiveNetworkInfo();
return activeNetworkInfo != null && activeNetworkInfo.isConnected();
}
}catch (Exception ex){
ex.printStackTrace();
}
return false;
}
/**
* This function check if the selected account is already existed.
*
* @param s Name of the account from accountList e.g. Twitter
* @return true is existed, false otherwise
*/
public static boolean checkAlreadyExist(AccountDatabase.AccountName s) {
Realm realm = Realm.getDefaultInstance();
// Query in the realm database
RealmQuery<AccountDatabase> query = realm.where(AccountDatabase.class);
// Checking if string equals to is exist or not
query.equalTo("name", s.toString());
RealmResults<AccountDatabase> result1 = query.findAll();
// Here checking if count of that values is greater than zero
return (result1.size() > 0);
}
public static ArrayList<AccountDatabase.AccountName> getLoggedInAccountsList(){
ArrayList<AccountDatabase.AccountName> list = new ArrayList<>();
for (AccountDatabase.AccountName account : AccountDatabase.AccountName.values()){
if (checkAlreadyExist(account))
list.add(account);
}
return list;
}
public static ArrayList<AccountDatabase.AccountName> getSharableAccountsList(){
ArrayList<AccountDatabase.AccountName> list = new ArrayList<>();
PackageManager packageManager = (ActivitySwitchHelper.context).getPackageManager();
if (isAppInstalled(PACKAGE_INSTAGRAM,packageManager))
list.add(AccountDatabase.AccountName.INSTAGRAM);
if (isAppInstalled(PACKAGE_WHATSAPP,packageManager))
list.add(AccountDatabase.AccountName.WHATSAPP);
/*if (isAppInstalled(PACKAGE_GOOGLEPLUS,packageManager))
list.add(AccountDatabase.AccountName.GOOGLEPLUS);*/
if (isAppInstalled(PACKAGE_MESSENGER,packageManager))
list.add(AccountDatabase.AccountName.MESSENGER);
if (isAppInstalled(PACKAGE_SNAPCHAT,packageManager))
list.add(AccountDatabase.AccountName.SNAPCHAT);
list.addAll(getLoggedInAccountsList());
if (!list.contains(AccountDatabase.AccountName.IMGUR))
list.add(AccountDatabase.AccountName.IMGUR);
list.add(AccountDatabase.AccountName.OTHERS);
return list;
}
public static Uri getImageUri(Context inContext, String imagePath) {
Bitmap inImage = getBitmapFromPath(imagePath);
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
inImage.compress(Bitmap.CompressFormat.JPEG, 100, bytes);
String path = MediaStore.Images.Media.insertImage(inContext.getContentResolver(), inImage, "Title", null);
return Uri.parse(path);
}
public static String getMimeType(String url) {
String type = null;
String extension = MimeTypeMap.getFileExtensionFromUrl(url);
if (extension != null) {
type = MimeTypeMap.getSingleton().getMimeTypeFromExtension(extension);
}
return type;
}
public static boolean checkNetwork(Context context, @NonNull View view) {
if (isInternetOn(context)) {
return true;
} else
SnackBarHandler.show(view,context.getString(R.string.not_connected));
return false;
}
public static void promptSpeechInput(Activity activity, int requestCode, View parentView, String promtMsg) {
Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.getDefault());
intent.putExtra(RecognizerIntent.EXTRA_PROMPT, promtMsg);
try {
activity.startActivityForResult(intent, requestCode);
} catch (ActivityNotFoundException a) {
SnackBarHandler.show(parentView,activity.getString(R.string.speech_not_supported));
}
}
}
| 1 | 12,880 | I think this import will be unused now. If it is unused remove it. | fossasia-phimpme-android | java |
@@ -37,6 +37,13 @@ type MutationItem struct {
value []byte
}
+func NewBatch(kv RwKV) StatelessRwTx {
+ return &mutation{
+ db: NewObjectDatabase(kv),
+ puts: btree.New(32),
+ }
+}
+
func (mi *MutationItem) Less(than btree.Item) bool {
i := than.(*MutationItem)
c := strings.Compare(mi.table, i.table) | 1 | package ethdb
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/google/btree"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/log"
"github.com/ledgerwatch/turbo-geth/metrics"
)
var (
dbCommitBigBatchTimer = metrics.NewRegisteredTimer("db/commit/big_batch", nil)
)
type mutation struct {
puts *btree.BTree
mu sync.RWMutex
searchItem MutationItem
size int
db Database
}
type MutationItem struct {
table string
key []byte
value []byte
}
func (mi *MutationItem) Less(than btree.Item) bool {
i := than.(*MutationItem)
c := strings.Compare(mi.table, i.table)
if c != 0 {
return c < 0
}
return bytes.Compare(mi.key, i.key) < 0
}
func (m *mutation) RwKV() RwKV {
if casted, ok := m.db.(HasRwKV); ok {
return casted.RwKV()
}
return nil
}
func (m *mutation) getMem(table string, key []byte) ([]byte, bool) {
m.mu.RLock()
defer m.mu.RUnlock()
m.searchItem.table = table
m.searchItem.key = key
i := m.puts.Get(&m.searchItem)
if i == nil {
return nil, false
}
return i.(*MutationItem).value, true
}
func (m *mutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) {
v, ok := m.getMem(dbutils.Sequence, []byte(bucket))
if !ok && m.db != nil {
v, err = m.db.Get(dbutils.Sequence, []byte(bucket))
if err != nil && !errors.Is(err, ErrKeyNotFound) {
return 0, err
}
}
var currentV uint64 = 0
if len(v) > 0 {
currentV = binary.BigEndian.Uint64(v)
}
newVBytes := make([]byte, 8)
binary.BigEndian.PutUint64(newVBytes, currentV+amount)
if err = m.Put(dbutils.Sequence, []byte(bucket), newVBytes); err != nil {
return 0, err
}
return currentV, nil
}
func (m *mutation) ReadSequence(bucket string) (res uint64, err error) {
v, ok := m.getMem(dbutils.Sequence, []byte(bucket))
if !ok && m.db != nil {
v, err = m.db.Get(dbutils.Sequence, []byte(bucket))
if err != nil && !errors.Is(err, ErrKeyNotFound) {
return 0, err
}
}
var currentV uint64 = 0
if len(v) > 0 {
currentV = binary.BigEndian.Uint64(v)
}
return currentV, nil
}
// Can only be called from the worker thread
func (m *mutation) Get(table string, key []byte) ([]byte, error) {
if value, ok := m.getMem(table, key); ok {
if value == nil {
return nil, ErrKeyNotFound
}
return value, nil
}
if m.db != nil {
return m.db.Get(table, key)
}
return nil, ErrKeyNotFound
}
func (m *mutation) Last(table string) ([]byte, []byte, error) {
return m.db.Last(table)
}
func (m *mutation) hasMem(table string, key []byte) bool {
m.mu.RLock()
defer m.mu.RUnlock()
m.searchItem.table = table
m.searchItem.key = key
return m.puts.Has(&m.searchItem)
}
func (m *mutation) Has(table string, key []byte) (bool, error) {
if m.hasMem(table, key) {
return true, nil
}
if m.db != nil {
return m.db.Has(table, key)
}
return false, nil
}
func (m *mutation) Put(table string, key []byte, value []byte) error {
m.mu.Lock()
defer m.mu.Unlock()
newMi := &MutationItem{table: table, key: key, value: value}
i := m.puts.ReplaceOrInsert(newMi)
m.size += int(unsafe.Sizeof(newMi)) + len(key) + len(value)
if i != nil {
oldMi := i.(*MutationItem)
m.size -= (int(unsafe.Sizeof(oldMi)) + len(oldMi.key) + len(oldMi.value))
}
return nil
}
func (m *mutation) Append(table string, key []byte, value []byte) error {
return m.Put(table, key, value)
}
func (m *mutation) AppendDup(table string, key []byte, value []byte) error {
return m.Put(table, key, value)
}
func (m *mutation) MultiPut(tuples ...[]byte) (uint64, error) {
m.mu.Lock()
defer m.mu.Unlock()
l := len(tuples)
for i := 0; i < l; i += 3 {
newMi := &MutationItem{table: string(tuples[i]), key: tuples[i+1], value: tuples[i+2]}
i := m.puts.ReplaceOrInsert(newMi)
m.size += int(unsafe.Sizeof(newMi)) + len(newMi.key) + len(newMi.value)
if i != nil {
oldMi := i.(*MutationItem)
m.size -= (int(unsafe.Sizeof(oldMi)) + len(oldMi.key) + len(oldMi.value))
}
}
return 0, nil
}
func (m *mutation) BatchSize() int {
m.mu.RLock()
defer m.mu.RUnlock()
return m.size
}
// WARNING: Merged mem/DB walk is not implemented
func (m *mutation) Walk(table string, startkey []byte, fixedbits int, walker func([]byte, []byte) (bool, error)) error {
m.panicOnEmptyDB()
return m.db.Walk(table, startkey, fixedbits, walker)
}
func (m *mutation) Delete(table string, k, v []byte) error {
if v != nil {
return m.db.Delete(table, k, v) // TODO: mutation to support DupSort deletes
}
//m.puts.Delete(table, k)
return m.Put(table, k, nil)
}
func (m *mutation) CommitAndBegin(ctx context.Context) error {
err := m.Commit()
return err
}
func (m *mutation) RollbackAndBegin(ctx context.Context) error {
m.Rollback()
return nil
}
func (m *mutation) doCommit(tx RwTx) error {
var prevTable string
var c RwCursor
var innerErr error
var isEndOfBucket bool
logEvery := time.NewTicker(30 * time.Second)
defer logEvery.Stop()
count := 0
total := float64(m.puts.Len())
m.puts.Ascend(func(i btree.Item) bool {
mi := i.(*MutationItem)
if mi.table != prevTable {
if c != nil {
c.Close()
}
var err error
c, err = tx.RwCursor(mi.table)
if err != nil {
innerErr = err
return false
}
prevTable = mi.table
firstKey, _, err := c.Seek(mi.key)
if err != nil {
innerErr = err
return false
}
isEndOfBucket = firstKey == nil
}
if isEndOfBucket {
if len(mi.value) > 0 {
if err := c.Append(mi.key, mi.value); err != nil {
innerErr = err
return false
}
}
} else if len(mi.value) == 0 {
if err := c.Delete(mi.key, nil); err != nil {
innerErr = err
return false
}
} else {
if err := c.Put(mi.key, mi.value); err != nil {
innerErr = err
return false
}
}
count++
select {
default:
case <-logEvery.C:
progress := fmt.Sprintf("%.1fM/%.1fM", float64(count)/1_000_000, total/1_000_000)
log.Info("Write to db", "progress", progress, "current table", mi.table)
}
return true
})
return innerErr
}
func (m *mutation) Commit() error {
if m.db == nil {
return nil
}
m.mu.Lock()
defer m.mu.Unlock()
if tx, ok := m.db.(HasTx); ok {
if err := m.doCommit(tx.Tx().(RwTx)); err != nil {
return err
}
} else {
if err := m.db.(HasRwKV).RwKV().Update(context.Background(), func(tx RwTx) error {
return m.doCommit(tx)
}); err != nil {
return err
}
}
m.puts.Clear(false /* addNodesToFreelist */)
m.size = 0
return nil
}
func (m *mutation) Rollback() {
m.mu.Lock()
defer m.mu.Unlock()
m.puts.Clear(false /* addNodesToFreelist */)
m.size = 0
}
func (m *mutation) Keys() ([][]byte, error) {
m.mu.RLock()
defer m.mu.RUnlock()
tuples := common.NewTuples(m.puts.Len(), 2, 1)
var innerErr error
m.puts.Ascend(func(i btree.Item) bool {
mi := i.(*MutationItem)
if err := tuples.Append([]byte(mi.table), mi.key); err != nil {
innerErr = err
return false
}
return true
})
return tuples.Values, innerErr
}
func (m *mutation) Close() {
m.Rollback()
}
func (m *mutation) NewBatch() DbWithPendingMutations {
mm := &mutation{
db: m,
puts: btree.New(32),
}
return mm
}
func (m *mutation) Begin(ctx context.Context, flags TxFlags) (DbWithPendingMutations, error) {
return m.db.Begin(ctx, flags)
}
func (m *mutation) BeginGetter(ctx context.Context) (GetterTx, error) {
return m.db.BeginGetter(ctx)
}
func (m *mutation) panicOnEmptyDB() {
if m.db == nil {
panic("Not implemented")
}
}
func (m *mutation) MemCopy() Database {
m.panicOnEmptyDB()
return m.db
}
func (m *mutation) SetRwKV(kv RwKV) {
m.db.(HasRwKV).SetRwKV(kv)
}
func NewRWDecorator(db Database) *RWCounterDecorator {
return &RWCounterDecorator{
db,
DBCounterStats{},
}
}
type RWCounterDecorator struct {
Database
DBCounterStats
}
type DBCounterStats struct {
Put uint64
Get uint64
GetS uint64
GetAsOf uint64
Has uint64
Walk uint64
WalkAsOf uint64
MultiWalkAsOf uint64
Delete uint64
MultiPut uint64
}
func (d *RWCounterDecorator) Put(bucket string, key, value []byte) error {
atomic.AddUint64(&d.DBCounterStats.Put, 1)
return d.Database.Put(bucket, key, value)
}
func (d *RWCounterDecorator) Get(bucket string, key []byte) ([]byte, error) {
atomic.AddUint64(&d.DBCounterStats.Get, 1)
return d.Database.Get(bucket, key)
}
func (d *RWCounterDecorator) Has(bucket string, key []byte) (bool, error) {
atomic.AddUint64(&d.DBCounterStats.Has, 1)
return d.Database.Has(bucket, key)
}
func (d *RWCounterDecorator) Walk(bucket string, startkey []byte, fixedbits int, walker func([]byte, []byte) (bool, error)) error {
atomic.AddUint64(&d.DBCounterStats.Walk, 1)
return d.Database.Walk(bucket, startkey, fixedbits, walker)
}
func (d *RWCounterDecorator) Delete(bucket string, k, v []byte) error {
atomic.AddUint64(&d.DBCounterStats.Delete, 1)
return d.Database.Delete(bucket, k, v)
}
func (d *RWCounterDecorator) MultiPut(tuples ...[]byte) (uint64, error) {
atomic.AddUint64(&d.DBCounterStats.MultiPut, 1)
return d.Database.MultiPut(tuples...)
}
func (d *RWCounterDecorator) NewBatch() DbWithPendingMutations {
mm := &mutation{
db: d,
puts: btree.New(32),
}
return mm
}
| 1 | 22,041 | Let's change to tx | ledgerwatch-erigon | go |
@@ -55,6 +55,12 @@ const (
// By default, this will be the block type given to all blocks
// that aren't explicitly some other type.
defaultBlockTypeDefault = keybase1.BlockType_DATA
+
+ // By default, allow 10% of the free bytes on disk to be used in the disk block cache.
+ defaultDiskBlockCacheFraction = 0.10
+
+ // By default, allow 15% of the free bytes on disk to be used in the sync block cache.
+ defaultSyncBlockCacheFraction = 0.10
)
// ConfigLocal implements the Config interface using purely local | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"flag"
"os"
"path/filepath"
"strings"
"sync"
"time"
kbname "github.com/keybase/client/go/kbun"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/cache"
"github.com/keybase/kbfs/ioutil"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfsedits"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
metrics "github.com/rcrowley/go-metrics"
"github.com/shirou/gopsutil/mem"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
"golang.org/x/net/context"
"golang.org/x/net/trace"
)
const (
// Max supported size of a directory entry name.
maxNameBytesDefault = 255
// Default time after setting the rekey bit before prompting for a
// paper key.
rekeyWithPromptWaitTimeDefault = 10 * time.Minute
// see Config doc for the purpose of DelayedCancellationGracePeriod
delayedCancellationGracePeriodDefault = 2 * time.Second
// tlfValidDurationDefault is the default for tlf validity before redoing identify.
tlfValidDurationDefault = 6 * time.Hour
// bgFlushDirOpThresholdDefault is the default for how many
// directory operations should be batched together in a single
// background flush.
bgFlushDirOpBatchSizeDefault = 100
// bgFlushPeriodDefault is the default for how long to wait for a
// batch to fill up before syncing a set of changes to the servers.
bgFlushPeriodDefault = 1 * time.Second
keyBundlesCacheCapacityBytes = 10 * cache.MB
// folder name for persisted config parameters.
syncedTlfConfigFolderName = "synced_tlf_config"
// By default, this will be the block type given to all blocks
// that aren't explicitly some other type.
defaultBlockTypeDefault = keybase1.BlockType_DATA
)
// ConfigLocal implements the Config interface using purely local
// server objects (no KBFS operations used RPCs).
type ConfigLocal struct {
lock sync.RWMutex
kbfs KBFSOps
keyman KeyManager
rep Reporter
kcache KeyCache
kbcache kbfsmd.KeyBundleCache
bcache BlockCache
dirtyBcache DirtyBlockCache
diskBlockCache DiskBlockCache
codec kbfscodec.Codec
mdops MDOps
kops KeyOps
crypto Crypto
chat Chat
mdcache MDCache
bops BlockOps
mdserv MDServer
bserv BlockServer
keyserv KeyServer
service KeybaseService
bsplit BlockSplitter
notifier Notifier
clock Clock
kbpki KBPKI
renamer ConflictRenamer
userHistory *kbfsedits.UserHistory
registry metrics.Registry
loggerFn func(prefix string) logger.Logger
noBGFlush bool // logic opposite so the default value is the common setting
rwpWaitTime time.Duration
diskLimiter DiskLimiter
syncedTlfs map[tlf.ID]bool
defaultBlockType keybase1.BlockType
kbfsService *KBFSService
kbCtx Context
rootNodeWrappers []func(Node) Node
maxNameBytes uint32
rekeyQueue RekeyQueue
storageRoot string
diskCacheMode DiskCacheMode
traceLock sync.RWMutex
traceEnabled bool
delayedCancellationGracePeriod time.Duration
// allKnownConfigsForTesting is used for testing, and contains all created
// Config objects in this test.
allKnownConfigsForTesting *[]Config
// tlfValidDuration is the time TLFs are valid before redoing identification.
tlfValidDuration time.Duration
// bgFlushDirOpBatchSize indicates how many directory operations
// should be batched together in a single background flush.
bgFlushDirOpBatchSize int
// bgFlushPeriod indicates how long to wait for a batch to fill up
// before syncing a set of changes to the servers.
bgFlushPeriod time.Duration
// metadataVersion is the version to use when creating new metadata.
metadataVersion kbfsmd.MetadataVer
mode InitMode
quotaUsage map[keybase1.UserOrTeamID]*EventuallyConsistentQuotaUsage
rekeyFSMLimiter *OngoingWorkLimiter
}
// DiskCacheMode represents the mode of initialization for the disk cache.
type DiskCacheMode int
var _ flag.Value = (*DiskCacheMode)(nil)
const (
// DiskCacheModeOff indicates to leave off the disk cache.
DiskCacheModeOff DiskCacheMode = iota
// DiskCacheModeLocal indicates to use a local disk cache.
DiskCacheModeLocal
// DiskCacheModeRemote indicates to use a remote disk cache.
DiskCacheModeRemote
)
// String outputs a human-readable description of this DiskBlockCacheMode.
func (m DiskCacheMode) String() string {
switch m {
case DiskCacheModeOff:
return "off"
case DiskCacheModeLocal:
return "local"
case DiskCacheModeRemote:
return "remote"
}
return "unknown"
}
// Set parses a string representing a disk block cache initialization mode,
// and outputs the mode value corresponding to that string. Defaults to
// DiskCacheModeOff.
func (m *DiskCacheMode) Set(s string) error {
*m = DiskCacheModeOff
switch strings.ToLower(strings.TrimSpace(s)) {
case "local":
*m = DiskCacheModeLocal
case "remote":
*m = DiskCacheModeRemote
}
return nil
}
var _ Config = (*ConfigLocal)(nil)
// LocalUser represents a fake KBFS user, useful for testing.
type LocalUser struct {
UserInfo
Asserts []string
// Index into UserInfo.CryptPublicKeys.
CurrentCryptPublicKeyIndex int
// Index into UserInfo.VerifyingKeys.
CurrentVerifyingKeyIndex int
// Unverified keys.
UnverifiedKeys []keybase1.PublicKey
}
// GetCurrentCryptPublicKey returns this LocalUser's public encryption key.
func (lu *LocalUser) GetCurrentCryptPublicKey() kbfscrypto.CryptPublicKey {
return lu.CryptPublicKeys[lu.CurrentCryptPublicKeyIndex]
}
// GetCurrentVerifyingKey returns this LocalUser's public signing key.
func (lu *LocalUser) GetCurrentVerifyingKey() kbfscrypto.VerifyingKey {
return lu.VerifyingKeys[lu.CurrentVerifyingKeyIndex]
}
func verifyingKeysToPublicKeys(
keys []kbfscrypto.VerifyingKey) []keybase1.PublicKey {
publicKeys := make([]keybase1.PublicKey, len(keys))
for i, key := range keys {
publicKeys[i] = keybase1.PublicKey{
KID: key.KID(),
IsSibkey: true,
}
}
return publicKeys
}
func cryptPublicKeysToPublicKeys(
keys []kbfscrypto.CryptPublicKey) []keybase1.PublicKey {
publicKeys := make([]keybase1.PublicKey, len(keys))
for i, key := range keys {
publicKeys[i] = keybase1.PublicKey{
KID: key.KID(),
IsSibkey: false,
}
}
return publicKeys
}
// GetPublicKeys returns all of this LocalUser's public encryption keys.
func (lu *LocalUser) GetPublicKeys() []keybase1.PublicKey {
sibkeys := verifyingKeysToPublicKeys(lu.VerifyingKeys)
subkeys := cryptPublicKeysToPublicKeys(lu.CryptPublicKeys)
return append(sibkeys, subkeys...)
}
func (lu LocalUser) deepCopy() LocalUser {
luCopy := lu
luCopy.VerifyingKeys = make(
[]kbfscrypto.VerifyingKey, len(lu.VerifyingKeys))
copy(luCopy.VerifyingKeys, lu.VerifyingKeys)
luCopy.CryptPublicKeys = make(
[]kbfscrypto.CryptPublicKey, len(lu.CryptPublicKeys))
copy(luCopy.CryptPublicKeys, lu.CryptPublicKeys)
luCopy.KIDNames = make(map[keybase1.KID]string, len(lu.KIDNames))
for k, v := range lu.KIDNames {
luCopy.KIDNames[k] = v
}
luCopy.RevokedVerifyingKeys = make(
map[kbfscrypto.VerifyingKey]revokedKeyInfo,
len(lu.RevokedVerifyingKeys))
for k, v := range lu.RevokedVerifyingKeys {
luCopy.RevokedVerifyingKeys[k] = v
}
luCopy.RevokedCryptPublicKeys = make(
map[kbfscrypto.CryptPublicKey]revokedKeyInfo,
len(lu.RevokedCryptPublicKeys))
for k, v := range lu.RevokedCryptPublicKeys {
luCopy.RevokedCryptPublicKeys[k] = v
}
luCopy.Asserts = make([]string, len(lu.Asserts))
copy(luCopy.Asserts, lu.Asserts)
luCopy.UnverifiedKeys = make([]keybase1.PublicKey, len(lu.UnverifiedKeys))
copy(luCopy.UnverifiedKeys, lu.UnverifiedKeys)
return luCopy
}
// Helper functions to get a various keys for a local user suitable
// for use with CryptoLocal. Each function will return the same key
// will always be returned for a given user.
// MakeLocalUserSigningKeyOrBust returns a unique signing key for this user.
func MakeLocalUserSigningKeyOrBust(
name kbname.NormalizedUsername) kbfscrypto.SigningKey {
return kbfscrypto.MakeFakeSigningKeyOrBust(
string(name) + " signing key")
}
// MakeLocalUserVerifyingKeyOrBust makes a new verifying key
// corresponding to the signing key for this user.
func MakeLocalUserVerifyingKeyOrBust(
name kbname.NormalizedUsername) kbfscrypto.VerifyingKey {
return MakeLocalUserSigningKeyOrBust(name).GetVerifyingKey()
}
// MakeLocalUserCryptPrivateKeyOrBust returns a unique private
// encryption key for this user.
func MakeLocalUserCryptPrivateKeyOrBust(
name kbname.NormalizedUsername) kbfscrypto.CryptPrivateKey {
return kbfscrypto.MakeFakeCryptPrivateKeyOrBust(
string(name) + " crypt key")
}
// MakeLocalUserCryptPublicKeyOrBust returns the public key
// corresponding to the crypt private key for this user.
func MakeLocalUserCryptPublicKeyOrBust(
name kbname.NormalizedUsername) kbfscrypto.CryptPublicKey {
return MakeLocalUserCryptPrivateKeyOrBust(name).GetPublicKey()
}
// MakeLocalTLFCryptKeyOrBust returns a unique private symmetric key
// for a TLF.
func MakeLocalTLFCryptKeyOrBust(
name string, keyGen kbfsmd.KeyGen) kbfscrypto.TLFCryptKey {
// Put the key gen first to make it more likely to fit into the
// 32-character "random" seed.
return kbfscrypto.MakeFakeTLFCryptKeyOrBust(
string(name) + " " + string(keyGen) + " crypt key ")
}
// MakeLocalUsers is a helper function to generate a list of
// LocalUsers suitable to use with KeybaseDaemonLocal.
func MakeLocalUsers(users []kbname.NormalizedUsername) []LocalUser {
localUsers := make([]LocalUser, len(users))
for i := 0; i < len(users); i++ {
verifyingKey := MakeLocalUserVerifyingKeyOrBust(users[i])
cryptPublicKey := MakeLocalUserCryptPublicKeyOrBust(users[i])
localUsers[i] = LocalUser{
UserInfo: UserInfo{
Name: users[i],
UID: keybase1.MakeTestUID(uint32(i + 1)),
VerifyingKeys: []kbfscrypto.VerifyingKey{verifyingKey},
CryptPublicKeys: []kbfscrypto.CryptPublicKey{cryptPublicKey},
KIDNames: map[keybase1.KID]string{
verifyingKey.KID(): "dev1",
},
},
CurrentCryptPublicKeyIndex: 0,
CurrentVerifyingKeyIndex: 0,
}
}
return localUsers
}
func makeLocalTeams(
teams []kbname.NormalizedUsername, startingIndex int, ty tlf.Type) (
localTeams []TeamInfo) {
localTeams = make([]TeamInfo, len(teams))
for index := 0; index < len(teams); index++ {
i := index + startingIndex
cryptKey := MakeLocalTLFCryptKeyOrBust(
buildCanonicalPathForTlfType(
tlf.SingleTeam, string(teams[index])),
kbfsmd.FirstValidKeyGen)
localTeams[index] = TeamInfo{
Name: teams[index],
TID: keybase1.MakeTestTeamID(uint32(i+1), ty == tlf.Public),
CryptKeys: map[kbfsmd.KeyGen]kbfscrypto.TLFCryptKey{
kbfsmd.FirstValidKeyGen: cryptKey,
},
LatestKeyGen: kbfsmd.FirstValidKeyGen,
}
// If this is a subteam, set the root ID.
if strings.Contains(string(teams[index]), ".") {
parts := strings.SplitN(string(teams[index]), ".", 2)
for j := 0; j < index; j++ {
if parts[0] == string(localTeams[j].Name) {
localTeams[index].RootID = localTeams[j].TID
break
}
}
}
}
return localTeams
}
// MakeLocalTeams is a helper function to generate a list of local
// teams suitable to use with KeybaseDaemonLocal. Any subteams must come
// after their root team names in the `teams` slice.
func MakeLocalTeams(teams []kbname.NormalizedUsername) []TeamInfo {
return makeLocalTeams(teams, 0, tlf.Private)
}
// getDefaultCleanBlockCacheCapacity returns the default clean block
// cache capacity. If we can get total RAM of the system, we cap at
// the smaller of <1/4 of available memory> and
// <MaxBlockSizeBytesDefault * DefaultBlocksInMemCache>; otherwise,
// fallback to latter.
func getDefaultCleanBlockCacheCapacity() uint64 {
capacity := uint64(MaxBlockSizeBytesDefault) * DefaultBlocksInMemCache
vmstat, err := mem.VirtualMemory()
if err == nil {
ramBased := vmstat.Total / 8
if ramBased < capacity {
capacity = ramBased
}
}
return capacity
}
// NewConfigLocal constructs a new ConfigLocal with some default
// components that don't depend on a logger. The caller will have to
// fill in the rest.
//
// TODO: Now that NewConfigLocal takes loggerFn, add more default
// components.
func NewConfigLocal(mode InitMode,
loggerFn func(module string) logger.Logger,
storageRoot string, diskCacheMode DiskCacheMode,
kbCtx Context) *ConfigLocal {
config := &ConfigLocal{
loggerFn: loggerFn,
storageRoot: storageRoot,
mode: mode,
diskCacheMode: diskCacheMode,
kbCtx: kbCtx,
}
if diskCacheMode == DiskCacheModeLocal {
config.loadSyncedTlfsLocked()
}
config.SetClock(wallClock{})
config.SetReporter(NewReporterSimple(config.Clock(), 10))
config.SetConflictRenamer(WriterDeviceDateConflictRenamer{config})
config.ResetCaches()
config.SetCodec(kbfscodec.NewMsgpack())
config.SetKeyOps(&KeyOpsStandard{config})
config.SetRekeyQueue(NewRekeyQueueStandard(config))
config.SetUserHistory(kbfsedits.NewUserHistory())
config.maxNameBytes = maxNameBytesDefault
config.rwpWaitTime = rekeyWithPromptWaitTimeDefault
config.delayedCancellationGracePeriod = delayedCancellationGracePeriodDefault
// Don't bother creating the registry if UseNilMetrics is set, or
// if we're in minimal mode.
if !metrics.UseNilMetrics && config.Mode().MetricsEnabled() {
registry := metrics.NewRegistry()
config.SetMetricsRegistry(registry)
}
config.tlfValidDuration = tlfValidDurationDefault
config.bgFlushDirOpBatchSize = bgFlushDirOpBatchSizeDefault
config.bgFlushPeriod = bgFlushPeriodDefault
config.metadataVersion = defaultClientMetadataVer
config.defaultBlockType = defaultBlockTypeDefault
config.quotaUsage =
make(map[keybase1.UserOrTeamID]*EventuallyConsistentQuotaUsage)
config.rekeyFSMLimiter = NewOngoingWorkLimiter(config.Mode().RekeyWorkers())
return config
}
// KBFSOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KBFSOps() KBFSOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbfs
}
// SetKBFSOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKBFSOps(k KBFSOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.kbfs = k
}
// KBPKI implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KBPKI() KBPKI {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbpki
}
// CurrentSessionGetter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) CurrentSessionGetter() CurrentSessionGetter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbpki
}
// SetKBPKI implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKBPKI(k KBPKI) {
c.lock.Lock()
defer c.lock.Unlock()
c.kbpki = k
}
// KeyManager implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyManager() KeyManager {
c.lock.RLock()
defer c.lock.RUnlock()
return c.keyman
}
// SetKeyManager implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyManager(k KeyManager) {
c.lock.Lock()
defer c.lock.Unlock()
c.keyman = k
}
// KeyGetter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) keyGetter() blockKeyGetter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.keyman
}
// Reporter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Reporter() Reporter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.rep
}
// SetReporter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetReporter(r Reporter) {
c.lock.Lock()
defer c.lock.Unlock()
c.rep = r
}
// KeyCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyCache() KeyCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kcache
}
// SetKeyCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyCache(k KeyCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.kcache = k
}
// KeyBundleCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyBundleCache() kbfsmd.KeyBundleCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbcache
}
// SetKeyBundleCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyBundleCache(k kbfsmd.KeyBundleCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.kbcache = k
}
// BlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockCache() BlockCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bcache
}
// SetBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockCache(b BlockCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.bcache = b
}
// DirtyBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DirtyBlockCache() DirtyBlockCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.dirtyBcache
}
// SetDirtyBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetDirtyBlockCache(d DirtyBlockCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.dirtyBcache = d
}
// DiskBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DiskBlockCache() DiskBlockCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.diskBlockCache
}
// DiskLimiter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DiskLimiter() DiskLimiter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.diskLimiter
}
// Crypto implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Crypto() Crypto {
c.lock.RLock()
defer c.lock.RUnlock()
return c.crypto
}
// Chat implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Chat() Chat {
c.lock.RLock()
defer c.lock.RUnlock()
return c.chat
}
// Signer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Signer() kbfscrypto.Signer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.crypto
}
// SetCrypto implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetCrypto(cr Crypto) {
c.lock.Lock()
defer c.lock.Unlock()
c.crypto = cr
}
// SetChat implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetChat(ch Chat) {
c.lock.Lock()
defer c.lock.Unlock()
c.chat = ch
}
// CryptoPure implements the Config interface for ConfigLocal.
func (c *ConfigLocal) cryptoPure() cryptoPure {
c.lock.RLock()
defer c.lock.RUnlock()
return c.crypto
}
// Codec implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Codec() kbfscodec.Codec {
c.lock.RLock()
defer c.lock.RUnlock()
return c.codec
}
// SetCodec implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetCodec(co kbfscodec.Codec) {
c.lock.Lock()
defer c.lock.Unlock()
c.codec = co
RegisterOps(c.codec)
}
// MDOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MDOps() MDOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.mdops
}
// SetMDOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMDOps(m MDOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.mdops = m
}
// KeyOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyOps() KeyOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kops
}
// SetKeyOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyOps(k KeyOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.kops = k
}
// MDCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MDCache() MDCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.mdcache
}
// SetMDCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMDCache(m MDCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.mdcache = m
}
// BlockOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockOps() BlockOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bops
}
// SetBlockOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockOps(b BlockOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.bops = b
}
// MDServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MDServer() MDServer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.mdserv
}
// SetMDServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMDServer(m MDServer) {
c.lock.Lock()
defer c.lock.Unlock()
c.mdserv = m
}
// BlockServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockServer() BlockServer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bserv
}
// SetBlockServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockServer(b BlockServer) {
c.lock.Lock()
defer c.lock.Unlock()
c.bserv = b
}
// KeyServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyServer() KeyServer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.keyserv
}
// SetKeyServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyServer(k KeyServer) {
c.lock.Lock()
defer c.lock.Unlock()
c.keyserv = k
}
// KeybaseService implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeybaseService() KeybaseService {
c.lock.RLock()
defer c.lock.RUnlock()
return c.service
}
// SetKeybaseService implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeybaseService(k KeybaseService) {
c.lock.Lock()
defer c.lock.Unlock()
c.service = k
}
// BlockSplitter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockSplitter() BlockSplitter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bsplit
}
// SetBlockSplitter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockSplitter(b BlockSplitter) {
c.lock.Lock()
defer c.lock.Unlock()
c.bsplit = b
}
// Notifier implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Notifier() Notifier {
c.lock.RLock()
defer c.lock.RUnlock()
return c.notifier
}
// SetNotifier implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetNotifier(n Notifier) {
c.lock.Lock()
defer c.lock.Unlock()
c.notifier = n
}
// Clock implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Clock() Clock {
c.lock.RLock()
defer c.lock.RUnlock()
return c.clock
}
// SetClock implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetClock(cl Clock) {
c.lock.Lock()
defer c.lock.Unlock()
c.clock = cl
}
// ConflictRenamer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) ConflictRenamer() ConflictRenamer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.renamer
}
// SetConflictRenamer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetConflictRenamer(cr ConflictRenamer) {
c.lock.Lock()
defer c.lock.Unlock()
c.renamer = cr
}
// UserHistory implements the Config interface for ConfigLocal.
func (c *ConfigLocal) UserHistory() *kbfsedits.UserHistory {
c.lock.RLock()
defer c.lock.RUnlock()
return c.userHistory
}
// SetUserHistory implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetUserHistory(uh *kbfsedits.UserHistory) {
c.lock.Lock()
defer c.lock.Unlock()
c.userHistory = uh
}
// MetadataVersion implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MetadataVersion() kbfsmd.MetadataVer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.metadataVersion
}
// SetMetadataVersion implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMetadataVersion(mdVer kbfsmd.MetadataVer) {
c.lock.Lock()
defer c.lock.Unlock()
c.metadataVersion = mdVer
}
// DataVersion implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DataVersion() DataVer {
return IndirectDirsDataVer
}
// DefaultBlockType implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DefaultBlockType() keybase1.BlockType {
c.lock.RLock()
defer c.lock.RUnlock()
return c.defaultBlockType
}
// SetDefaultBlockType implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetDefaultBlockType(blockType keybase1.BlockType) {
c.lock.Lock()
defer c.lock.Unlock()
c.defaultBlockType = blockType
}
// DoBackgroundFlushes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DoBackgroundFlushes() bool {
if !c.Mode().BackgroundFlushesEnabled() {
return false
}
c.lock.RLock()
defer c.lock.RUnlock()
return !c.noBGFlush
}
// SetDoBackgroundFlushes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetDoBackgroundFlushes(doBGFlush bool) {
c.lock.Lock()
defer c.lock.Unlock()
c.noBGFlush = !doBGFlush
}
// RekeyWithPromptWaitTime implements the Config interface for
// ConfigLocal.
func (c *ConfigLocal) RekeyWithPromptWaitTime() time.Duration {
c.lock.Lock()
defer c.lock.Unlock()
return c.rwpWaitTime
}
// SetRekeyWithPromptWaitTime implements the Config interface for
// ConfigLocal.
func (c *ConfigLocal) SetRekeyWithPromptWaitTime(d time.Duration) {
c.lock.RLock()
defer c.lock.RUnlock()
c.rwpWaitTime = d
}
// Mode implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Mode() InitMode {
return c.mode
}
// IsTestMode implements the Config interface for ConfigLocal.
func (c *ConfigLocal) IsTestMode() bool {
return c.mode.IsTestMode()
}
// DelayedCancellationGracePeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DelayedCancellationGracePeriod() time.Duration {
return c.delayedCancellationGracePeriod
}
// SetDelayedCancellationGracePeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetDelayedCancellationGracePeriod(d time.Duration) {
c.delayedCancellationGracePeriod = d
}
// ReqsBufSize implements the Config interface for ConfigLocal.
func (c *ConfigLocal) ReqsBufSize() int {
return 20
}
// MaxNameBytes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaxNameBytes() uint32 {
return c.maxNameBytes
}
// StorageRoot implements the Config interface for ConfigLocal.
func (c *ConfigLocal) StorageRoot() string {
return c.storageRoot
}
func (c *ConfigLocal) resetCachesWithoutShutdown() DirtyBlockCache {
c.lock.Lock()
defer c.lock.Unlock()
c.mdcache = NewMDCacheStandard(defaultMDCacheCapacity)
c.kcache = NewKeyCacheStandard(defaultMDCacheCapacity)
c.kbcache = kbfsmd.NewKeyBundleCacheLRU(keyBundlesCacheCapacityBytes)
log := c.MakeLogger("")
var capacity uint64
if c.bcache == nil {
capacity = getDefaultCleanBlockCacheCapacity()
log.Debug("setting default clean block cache capacity to %d",
capacity)
} else {
capacity = c.bcache.GetCleanBytesCapacity()
log.Debug("setting clean block cache capacity based on existing value %d",
capacity)
}
c.bcache = NewBlockCacheStandard(10000, capacity)
if !c.Mode().DirtyBlockCacheEnabled() {
return nil
}
oldDirtyBcache := c.dirtyBcache
// TODO: we should probably fail or re-schedule this reset if
// there is anything dirty in the dirty block cache.
// The minimum number of bytes we'll try to sync in parallel.
// This should be roughly the minimum amount of bytes we expect
// our worst supported connection to send within the timeout
// forced on us by the upper layer (19 seconds on OS X). With the
// current default of a single block, this minimum works out to
// ~1MB, so we can support a connection speed as low as ~54 KB/s.
minSyncBufferSize := int64(MaxBlockSizeBytesDefault)
// The maximum number of bytes we can try to sync at once (also limits the
// amount of memory used by dirty blocks). We use the same value from clean
// block cache capacity here.
maxSyncBufferSize := int64(capacity)
// Start off conservatively to avoid getting immediate timeouts on
// slow connections.
startSyncBufferSize := minSyncBufferSize
dbcLog := c.MakeLogger("DBC")
c.dirtyBcache = NewDirtyBlockCacheStandard(c.clock, dbcLog,
minSyncBufferSize, maxSyncBufferSize, startSyncBufferSize)
return oldDirtyBcache
}
// ResetCaches implements the Config interface for ConfigLocal.
func (c *ConfigLocal) ResetCaches() {
oldDirtyBcache := c.resetCachesWithoutShutdown()
jServer, err := GetJournalServer(c)
if err == nil {
if err := c.journalizeBcaches(jServer); err != nil {
if log := c.MakeLogger(""); log != nil {
log.CWarningf(nil, "Error journalizing dirty block cache: %+v", err)
}
}
}
if oldDirtyBcache != nil {
// Shutdown outside of the lock so it doesn't block other
// access to this config.
if err := oldDirtyBcache.Shutdown(); err != nil {
if log := c.MakeLogger(""); log != nil {
log.CWarningf(nil,
"Error shutting down old dirty block cache: %+v", err)
}
}
}
}
// MakeLogger implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MakeLogger(module string) logger.Logger {
// No need to lock since c.loggerFn is initialized once at
// construction. Also resetCachesWithoutShutdown would deadlock.
return c.loggerFn(module)
}
// MetricsRegistry implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MetricsRegistry() metrics.Registry {
return c.registry
}
// SetRekeyQueue implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetRekeyQueue(r RekeyQueue) {
c.rekeyQueue = r
}
// RekeyQueue implements the Config interface for ConfigLocal.
func (c *ConfigLocal) RekeyQueue() RekeyQueue {
return c.rekeyQueue
}
// SetMetricsRegistry implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMetricsRegistry(r metrics.Registry) {
c.registry = r
}
// SetTraceOptions implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetTraceOptions(enabled bool) {
c.traceLock.Lock()
defer c.traceLock.Unlock()
c.traceEnabled = enabled
}
// MaybeStartTrace implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaybeStartTrace(
ctx context.Context, family, title string) context.Context {
traceEnabled := func() bool {
c.traceLock.RLock()
defer c.traceLock.RUnlock()
return c.traceEnabled
}()
if !traceEnabled {
return ctx
}
tr := trace.New(family, title)
tr.SetMaxEvents(25)
ctx = trace.NewContext(ctx, tr)
return ctx
}
// MaybeFinishTrace implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaybeFinishTrace(ctx context.Context, err error) {
if tr, ok := trace.FromContext(ctx); ok {
if err != nil {
tr.LazyPrintf("err=%+v", err)
tr.SetError()
}
tr.Finish()
}
}
// SetTLFValidDuration implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetTLFValidDuration(r time.Duration) {
c.tlfValidDuration = r
}
// TLFValidDuration implements the Config interface for ConfigLocal.
func (c *ConfigLocal) TLFValidDuration() time.Duration {
return c.tlfValidDuration
}
// SetBGFlushDirOpBatchSize implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBGFlushDirOpBatchSize(s int) {
c.lock.Lock()
defer c.lock.Unlock()
c.bgFlushDirOpBatchSize = s
}
// BGFlushDirOpBatchSize implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BGFlushDirOpBatchSize() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bgFlushDirOpBatchSize
}
// SetBGFlushPeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBGFlushPeriod(p time.Duration) {
c.lock.Lock()
defer c.lock.Unlock()
c.bgFlushPeriod = p
}
// BGFlushPeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BGFlushPeriod() time.Duration {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bgFlushPeriod
}
// Shutdown implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Shutdown(ctx context.Context) error {
c.RekeyQueue().Shutdown()
if c.CheckStateOnShutdown() && c.allKnownConfigsForTesting != nil {
// Before we do anything, wait for all archiving and
// journaling to finish.
for _, config := range *c.allKnownConfigsForTesting {
kbfsOps, ok := config.KBFSOps().(*KBFSOpsStandard)
if !ok {
continue
}
if err := kbfsOps.shutdownEdits(ctx); err != nil {
return err
}
for _, fbo := range kbfsOps.ops {
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil {
return err
}
log := config.MakeLogger("")
if err := WaitForTLFJournal(ctx, config, fbo.id(),
log); err != nil {
return err
}
// The above wait could have resulted in some MD
// flushes, so now we have to wait on any archives as
// well. We only need one more check for this, since
// archives don't produce MDs.
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := WaitForTLFJournal(ctx, config, fbo.id(),
log); err != nil {
return err
}
}
}
}
var errorList []error
err := c.KBFSOps().Shutdown(ctx)
if err != nil {
errorList = append(errorList, err)
// Continue with shutdown regardless of err.
err = nil
}
c.BlockOps().Shutdown()
c.MDServer().Shutdown()
c.KeyServer().Shutdown()
c.KeybaseService().Shutdown()
c.BlockServer().Shutdown(ctx)
c.Crypto().Shutdown()
c.Reporter().Shutdown()
dirtyBcache := c.DirtyBlockCache()
if dirtyBcache != nil {
err = dirtyBcache.Shutdown()
}
if err != nil {
errorList = append(errorList, err)
}
dbc := c.DiskBlockCache()
if dbc != nil {
dbc.Shutdown(ctx)
}
kbfsServ := c.kbfsService
if kbfsServ != nil {
kbfsServ.Shutdown()
}
if len(errorList) == 1 {
return errorList[0]
} else if len(errorList) > 1 {
// Aggregate errors
return errors.Errorf("Multiple errors on shutdown: %+v", errorList)
}
return nil
}
// CheckStateOnShutdown implements the Config interface for ConfigLocal.
func (c *ConfigLocal) CheckStateOnShutdown() bool {
if md, ok := c.MDServer().(mdServerLocal); ok {
return !md.isShutdown()
}
return false
}
func (c *ConfigLocal) journalizeBcaches(jServer *JournalServer) error {
syncCache, ok := c.DirtyBlockCache().(*DirtyBlockCacheStandard)
if !ok {
return errors.Errorf("Dirty bcache unexpectedly type %T", syncCache)
}
jServer.delegateDirtyBlockCache = syncCache
// Make a dirty block cache specifically for the journal
// server. Since this doesn't rely directly on the network,
// there's no need for an adaptive sync buffer size, so we
// always set the min and max to the same thing.
maxSyncBufferSize := int64(ForcedBranchSquashBytesThresholdDefault)
log := c.MakeLogger("DBCJ")
journalCache := NewDirtyBlockCacheStandard(c.clock, log,
maxSyncBufferSize, maxSyncBufferSize, maxSyncBufferSize)
c.SetDirtyBlockCache(jServer.dirtyBlockCache(journalCache))
jServer.delegateBlockCache = c.BlockCache()
c.SetBlockCache(jServer.blockCache())
return nil
}
func (c *ConfigLocal) getQuotaUsage(
chargedTo keybase1.UserOrTeamID) *EventuallyConsistentQuotaUsage {
c.lock.RLock()
quota, ok := c.quotaUsage[chargedTo]
if ok {
c.lock.RUnlock()
return quota
}
c.lock.RUnlock()
c.lock.Lock()
defer c.lock.Unlock()
quota, ok = c.quotaUsage[chargedTo]
if !ok {
if chargedTo.IsTeamOrSubteam() {
quota = NewEventuallyConsistentTeamQuotaUsage(
c, chargedTo.AsTeamOrBust(), "BDL")
} else {
quota = NewEventuallyConsistentQuotaUsage(c, "BDL")
}
c.quotaUsage[chargedTo] = quota
}
return quota
}
// EnableDiskLimiter fills in c.ciskLimiter for use in journaling and
// disk caching. It returns the EventuallyConsistentQuotaUsage object
// used by the disk limiter.
func (c *ConfigLocal) EnableDiskLimiter(configRoot string) error {
if c.diskLimiter != nil {
return errors.New("c.diskLimiter is already non-nil")
}
params := makeDefaultBackpressureDiskLimiterParams(
configRoot, c.getQuotaUsage)
log := c.MakeLogger("")
log.Debug("Setting disk storage byte limit to %d and file limit to %d",
params.byteLimit, params.fileLimit)
os.MkdirAll(configRoot, 0700)
diskLimiter, err := newBackpressureDiskLimiter(log, params)
if err != nil {
return err
}
c.diskLimiter = diskLimiter
return nil
}
// EnableJournaling creates a JournalServer and attaches it to
// this config. journalRoot must be non-empty. Errors returned are
// non-fatal.
func (c *ConfigLocal) EnableJournaling(
ctx context.Context, journalRoot string,
bws TLFJournalBackgroundWorkStatus) error {
jServer, err := GetJournalServer(c)
if err == nil {
// Journaling shouldn't be enabled twice for the same
// config.
return errors.New("Trying to enable journaling twice")
}
// TODO: Sanity-check the root directory, e.g. create
// it if it doesn't exist, make sure that it doesn't
// point to /keybase itself, etc.
log := c.MakeLogger("")
branchListener := c.KBFSOps().(branchChangeListener)
flushListener := c.KBFSOps().(mdFlushListener)
// Make sure the journal root exists.
err = ioutil.MkdirAll(journalRoot, 0700)
if err != nil {
return err
}
jServer = makeJournalServer(c, log, journalRoot, c.BlockCache(),
c.DirtyBlockCache(), c.BlockServer(), c.MDOps(), branchListener,
flushListener)
c.SetBlockServer(jServer.blockServer())
c.SetMDOps(jServer.mdOps())
bcacheErr := c.journalizeBcaches(jServer)
enableErr := func() error {
// If this fails, then existing journals will be
// enabled when we receive the login notification.
session, err := c.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
err = jServer.EnableExistingJournals(
ctx, session.UID, session.VerifyingKey, bws)
if err != nil {
return err
}
wg := jServer.MakeFBOsForExistingJournals(ctx)
wg.Wait()
return nil
}()
switch {
case bcacheErr != nil && enableErr != nil:
return errors.Errorf(
"Got errors %+v and %+v", bcacheErr, enableErr)
case bcacheErr != nil:
return bcacheErr
case enableErr != nil:
return enableErr
}
return nil
}
func (c *ConfigLocal) resetDiskBlockCacheLocked() error {
dbc, err := newDiskBlockCacheWrapped(c, c.storageRoot)
if err != nil {
return err
}
c.diskBlockCache = dbc
return nil
}
// MakeDiskBlockCacheIfNotExists implements the Config interface for
// ConfigLocal.
func (c *ConfigLocal) MakeDiskBlockCacheIfNotExists() error {
c.lock.Lock()
defer c.lock.Unlock()
if c.diskBlockCache != nil {
return nil
}
switch c.diskCacheMode {
case DiskCacheModeOff:
return nil
case DiskCacheModeLocal:
return c.resetDiskBlockCacheLocked()
case DiskCacheModeRemote:
dbc, err := NewDiskBlockCacheRemote(c.kbCtx, c)
if err != nil {
return err
}
c.diskBlockCache = dbc
return nil
}
return nil
}
func (c *ConfigLocal) openConfigLevelDB(configName string) (*levelDb, error) {
dbPath := filepath.Join(c.storageRoot, configName)
stor, err := storage.OpenFile(dbPath, false)
if err != nil {
return nil, err
}
return openLevelDB(stor)
}
func (c *ConfigLocal) loadSyncedTlfsLocked() (err error) {
syncedTlfs := make(map[tlf.ID]bool)
if c.IsTestMode() {
c.syncedTlfs = syncedTlfs
return nil
}
if c.storageRoot == "" {
return errors.New("empty storageRoot specified for non-test run")
}
ldb, err := c.openConfigLevelDB(syncedTlfConfigFolderName)
if err != nil {
return err
}
defer ldb.Close()
iter := ldb.NewIterator(nil, nil)
defer iter.Release()
log := c.MakeLogger("")
// If there are any un-parseable IDs, delete them.
deleteBatch := new(leveldb.Batch)
for iter.Next() {
key := string(iter.Key())
tlfID, err := tlf.ParseID(key)
if err != nil {
log.Debug("deleting TLF %s from synced TLF list", key)
deleteBatch.Delete(iter.Key())
continue
}
syncedTlfs[tlfID] = true
}
c.syncedTlfs = syncedTlfs
return ldb.Write(deleteBatch, nil)
}
// IsSyncedTlf implements the isSyncedTlfGetter interface for ConfigLocal.
func (c *ConfigLocal) IsSyncedTlf(tlfID tlf.ID) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.syncedTlfs[tlfID]
}
// SetTlfSyncState implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetTlfSyncState(tlfID tlf.ID, isSynced bool) error {
c.lock.Lock()
defer c.lock.Unlock()
if isSynced {
diskCacheWrapped, ok := c.diskBlockCache.(*diskBlockCacheWrapped)
if !ok {
return errors.Errorf("invalid disk cache type to set TLF sync "+
"state: %T", c.diskBlockCache)
}
if !diskCacheWrapped.IsSyncCacheEnabled() {
return errors.New("sync block cache is not enabled")
}
}
if !c.IsTestMode() {
if c.storageRoot == "" {
return errors.New("empty storageRoot specified for non-test run")
}
ldb, err := c.openConfigLevelDB(syncedTlfConfigFolderName)
if err != nil {
return err
}
defer ldb.Close()
tlfBytes, err := tlfID.MarshalText()
if err != nil {
return err
}
if isSynced {
err = ldb.Put(tlfBytes, nil, nil)
} else {
err = ldb.Delete(tlfBytes, nil)
}
if err != nil {
return err
}
}
c.syncedTlfs[tlfID] = isSynced
<-c.bops.TogglePrefetcher(true)
return nil
}
// PrefetchStatus implements the Config interface for ConfigLocal.
func (c *ConfigLocal) PrefetchStatus(ctx context.Context, tlfID tlf.ID,
ptr BlockPointer) PrefetchStatus {
_, prefetchStatus, _, err := c.BlockCache().GetWithPrefetch(ptr)
if err != nil {
prefetchStatus = NoPrefetch
dbc := c.DiskBlockCache()
if dbc != nil {
_, _, prefetchStatus, err = dbc.Get(ctx, tlfID, ptr.ID)
if err != nil {
prefetchStatus = NoPrefetch
}
}
}
return prefetchStatus
}
// GetRekeyFSMLimiter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) GetRekeyFSMLimiter() *OngoingWorkLimiter {
return c.rekeyFSMLimiter
}
// SetKBFSService sets the KBFSService for this ConfigLocal.
func (c *ConfigLocal) SetKBFSService(k *KBFSService) {
c.lock.Lock()
defer c.lock.Unlock()
if c.kbfsService != nil {
c.kbfsService.Shutdown()
}
c.kbfsService = k
}
// RootNodeWrappers implements the Config interface for ConfigLocal.
func (c *ConfigLocal) RootNodeWrappers() []func(Node) Node {
c.lock.RLock()
defer c.lock.RUnlock()
return c.rootNodeWrappers[:]
}
// AddRootNodeWrapper implements the Config interface for ConfigLocal.
func (c *ConfigLocal) AddRootNodeWrapper(f func(Node) Node) {
c.lock.Lock()
defer c.lock.Unlock()
c.rootNodeWrappers = append(c.rootNodeWrappers, f)
}
| 1 | 20,325 | Looks like I was wrong about the sync cache size being 15% before. There seems to be some reporting bug related to my use of the sync cache, that makes the limit different from the working set cache limit. For now, I say we keep this at `0.10` (change the param default too), and we can change it later if needed. | keybase-kbfs | go |
@@ -151,7 +151,7 @@ func sentryNotSetupWarning() {
// from the last saved version. If it is, prompt to request anon ddev usage stats
// and update the info.
func checkDdevVersionAndOptInSentry() error {
- if !output.JSONOutput && version.COMMIT != globalconfig.DdevGlobalConfig.LastUsedVersion && globalconfig.DdevGlobalConfig.InstrumentationOptIn == false {
+ if !output.JSONOutput && version.COMMIT != globalconfig.DdevGlobalConfig.LastUsedVersion && globalconfig.DdevGlobalConfig.InstrumentationOptIn == false && os.Getenv("DDEV_NO_SENTRY") == "" {
allowStats := util.Confirm("It looks like you have a new ddev release.\nMay we send anonymous ddev usage statistics and errors?\nTo know what we will see please take a look at\nhttps://ddev.readthedocs.io/en/latest/users/cli-usage/#opt-in-usage-information\nPermission to beam up?")
if allowStats {
globalconfig.DdevGlobalConfig.InstrumentationOptIn = true | 1 | package cmd
import (
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/globalconfig"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/updatecheck"
"github.com/drud/ddev/pkg/util"
"github.com/drud/ddev/pkg/version"
"github.com/getsentry/raven-go"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"os"
"path/filepath"
"strings"
"time"
)
var (
updateInterval = time.Hour * 24 * 7 // One week interval between updates
serviceType string
updateDocURL = "https://ddev.readthedocs.io/en/stable/#installation"
)
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "ddev",
Short: "A CLI for interacting with ddev.",
Long: "This Command Line Interface (CLI) gives you the ability to interact with the ddev to create a development environment.",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
ignores := []string{"version", "config", "hostname", "help", "auth-pantheon", "import-files"}
command := strings.Join(os.Args[1:], " ")
output.LogSetUp()
// Skip docker validation for any command listed in "ignores"
for _, k := range ignores {
if strings.Contains(command, k) {
return
}
}
err := dockerutil.CheckDockerVersion(version.DockerVersionConstraint)
if err != nil {
if err.Error() == "no docker" {
if os.Args[1] != "version" && os.Args[1] != "config" {
util.Failed("Could not connect to docker. Please ensure Docker is installed and running.")
}
} else {
util.Failed("The docker version currently installed does not meet ddev's requirements: %v", err)
}
}
err = dockerutil.CheckDockerCompose(version.DockerComposeVersionConstraint)
if err != nil {
if err.Error() == "no docker-compose" {
util.Failed("docker-compose does not appear to be installed.")
} else {
util.Failed("The docker-compose version currently installed does not meet ddev's requirements: %v", err)
}
}
updateFile := filepath.Join(globalconfig.GetGlobalDdevDir(), ".update")
// Do periodic detection of whether an update is available for ddev users.
timeToCheckForUpdates, err := updatecheck.IsUpdateNeeded(updateFile, updateInterval)
if err != nil {
util.Warning("Could not perform update check: %v", err)
}
if timeToCheckForUpdates {
// Recreate the updatefile with current time so we won't do this again soon.
err = updatecheck.ResetUpdateTime(updateFile)
if err != nil {
util.Warning("Failed to update updatecheck file %s", updateFile)
return // Do not continue as we'll end up with github api violations.
}
updateNeeded, updateURL, err := updatecheck.AvailableUpdates("drud", "ddev", version.DdevVersion)
if err != nil {
util.Warning("Could not check for updates. This is most often caused by a networking issue.")
log.Debug(err)
return
}
if updateNeeded {
util.Warning("\n\nA new update is available! please visit %s to download the update.\nFor upgrade help see %s", updateURL, updateDocURL)
}
}
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
// Do not report these comamnds
ignores := map[string]bool{"list": true, "version": true, "help": true, "auth-pantheon": true, "hostname": true}
if _, ok := ignores[cmd.CalledAs()]; ok {
return
}
sentryNotSetupWarning()
// All this nonsense is to capture the official usage we used for this command.
// Unfortunately cobra doesn't seem to provide this easily.
// We use the first word of Use: to get it.
cmdCopy := cmd
var fullCommand = make([]string, 0)
fullCommand = append(fullCommand, util.GetFirstWord(cmdCopy.Use))
for cmdCopy.HasParent() {
fullCommand = append(fullCommand, util.GetFirstWord(cmdCopy.Parent().Use))
cmdCopy = cmdCopy.Parent()
}
uString := "Usage:"
for i := len(fullCommand) - 1; i >= 0; i = i - 1 {
uString = uString + " " + fullCommand[i]
}
if globalconfig.DdevGlobalConfig.InstrumentationOptIn && version.SentryDSN != "" {
_ = raven.CaptureMessageAndWait(uString, map[string]string{"severity-level": "info", "report-type": "usage"})
}
},
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
// bind flags to viper config values...allows override by flag
viper.AutomaticEnv() // read in environment variables that match
if err := RootCmd.Execute(); err != nil {
os.Exit(-1)
}
}
func init() {
RootCmd.PersistentFlags().BoolVarP(&output.JSONOutput, "json-output", "j", false, "If true, user-oriented output will be in JSON format.")
err := globalconfig.ReadGlobalConfig()
if err != nil {
util.Failed("Failed to read global config file %s: %v", globalconfig.GetGlobalConfigPath(), err)
}
}
func sentryNotSetupWarning() {
if version.SentryDSN == "" && globalconfig.DdevGlobalConfig.InstrumentationOptIn {
output.UserOut.Warning("Instrumentation is opted in, but SentryDSN is not available.")
}
}
// checkDdevVersionAndOptInSentry() reads global config and checks to see if current version is different
// from the last saved version. If it is, prompt to request anon ddev usage stats
// and update the info.
func checkDdevVersionAndOptInSentry() error {
if !output.JSONOutput && version.COMMIT != globalconfig.DdevGlobalConfig.LastUsedVersion && globalconfig.DdevGlobalConfig.InstrumentationOptIn == false {
allowStats := util.Confirm("It looks like you have a new ddev release.\nMay we send anonymous ddev usage statistics and errors?\nTo know what we will see please take a look at\nhttps://ddev.readthedocs.io/en/latest/users/cli-usage/#opt-in-usage-information\nPermission to beam up?")
if allowStats {
globalconfig.DdevGlobalConfig.InstrumentationOptIn = true
}
globalconfig.DdevGlobalConfig.LastUsedVersion = version.COMMIT
err := globalconfig.WriteGlobalConfig(globalconfig.DdevGlobalConfig)
if err != nil {
return err
}
}
return nil
}
| 1 | 13,373 | I think it'd be a good idea to define the environment variable name as a constant in values.go because it's hard-coded in several places, but that's a small point. | drud-ddev | go |
@@ -0,0 +1,6 @@
+class AddIndexesToClassifications < ActiveRecord::Migration
+ def change
+ add_index :classifications, :topic_id
+ add_index :classifications, [:classifiable_id, :classifiable_type]
+ end
+end | 1 | 1 | 18,565 | Style/SymbolArray: Use %i or %I for an array of symbols. | thoughtbot-upcase | rb |
|
@@ -133,6 +133,13 @@ func TestComposeCmd(t *testing.T) {
assert.Error(err)
}
+func TestCheckCompose(t *testing.T) {
+ assert := asrt.New(t)
+
+ err := CheckDockerCompose()
+ assert.NoError(err)
+}
+
func TestGetAppContainers(t *testing.T) {
assert := asrt.New(t)
sites, err := GetAppContainers("dockertest") | 1 | package dockerutil_test
import (
"os"
"testing"
log "github.com/sirupsen/logrus"
"path/filepath"
. "github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/output"
docker "github.com/fsouza/go-dockerclient"
asrt "github.com/stretchr/testify/assert"
)
var (
// The image here can be any image, it just has to exist so it can be used for labels, etc.
TestRouterImage = "busybox"
TestRouterTag = "1"
)
func TestMain(m *testing.M) {
output.LogSetUp()
// prep docker container for docker util tests
client := GetDockerClient()
err := client.PullImage(docker.PullImageOptions{
Repository: TestRouterImage,
Tag: TestRouterTag,
}, docker.AuthConfiguration{})
if err != nil {
log.Fatal("failed to pull test image ", err)
}
container, err := client.CreateContainer(docker.CreateContainerOptions{
Name: "envtest",
Config: &docker.Config{
Image: TestRouterImage + ":" + TestRouterTag,
Labels: map[string]string{
"com.docker.compose.service": "ddevrouter",
"com.ddev.site-name": "dockertest",
},
Env: []string{"HOTDOG=superior-to-corndog", "POTATO=future-fry"},
},
})
if err != nil {
log.Fatal("failed to create/start docker container ", err)
}
exitStatus := m.Run()
// teardown docker container from docker util tests
err = client.RemoveContainer(docker.RemoveContainerOptions{
ID: container.ID,
Force: true,
})
if err != nil {
log.Fatal("failed to remove test container: ", err)
}
os.Exit(exitStatus)
}
// TestGetContainerHealth tests the function for processing container readiness.
func TestGetContainerHealth(t *testing.T) {
assert := asrt.New(t)
container := docker.APIContainers{
Status: "Up 24 seconds (health: starting)",
}
out := GetContainerHealth(container)
assert.Equal(out, "starting")
container = docker.APIContainers{
Status: "Up 14 minutes (healthy)",
}
out = GetContainerHealth(container)
assert.Equal(out, "healthy")
container = docker.APIContainers{
State: "exited",
}
out = GetContainerHealth(container)
assert.Equal(out, container.State)
container = docker.APIContainers{
State: "restarting",
}
out = GetContainerHealth(container)
assert.Equal(out, container.State)
}
// TestContainerWait tests the error cases for the container check wait loop.
func TestContainerWait(t *testing.T) {
assert := asrt.New(t)
labels := map[string]string{
"com.ddev.site-name": "foo",
"com.docker.compose.service": "web",
}
err := ContainerWait(0, labels)
assert.Error(err)
assert.Equal("health check timed out", err.Error())
err = ContainerWait(5, labels)
assert.Error(err)
assert.Equal("failed to query container", err.Error())
}
// TestComposeCmd tests execution of docker-compose commands.
func TestComposeCmd(t *testing.T) {
assert := asrt.New(t)
composeFiles := []string{filepath.Join("testdata", "docker-compose.yml")}
stdout, stderr, err := ComposeCmd(composeFiles, "config", "--services")
assert.NoError(err)
assert.Contains(stdout, "web")
assert.Contains(stdout, "db")
assert.Contains(stderr, "Defaulting to a blank string")
composeFiles = append(composeFiles, filepath.Join("testdata", "docker-compose.override.yml"))
stdout, stderr, err = ComposeCmd(composeFiles, "config", "--services")
assert.NoError(err)
assert.Contains(stdout, "web")
assert.Contains(stdout, "db")
assert.Contains(stdout, "foo")
assert.Contains(stderr, "Defaulting to a blank string")
composeFiles = []string{"invalid.yml"}
_, _, err = ComposeCmd(composeFiles, "config", "--services")
assert.Error(err)
}
func TestGetAppContainers(t *testing.T) {
assert := asrt.New(t)
sites, err := GetAppContainers("dockertest")
assert.NoError(err)
assert.Equal(sites[0].Image, TestRouterImage+":"+TestRouterTag)
}
func TestGetContainerEnv(t *testing.T) {
assert := asrt.New(t)
container, err := FindContainerByLabels(map[string]string{"com.docker.compose.service": "ddevrouter"})
assert.NoError(err)
env := GetContainerEnv("HOTDOG", container)
assert.Equal("superior-to-corndog", env)
env = GetContainerEnv("POTATO", container)
assert.Equal("future-fry", env)
env = GetContainerEnv("NONEXISTENT", container)
assert.Equal("", env)
}
| 1 | 11,989 | Our habit is to go ahead and put a description line (or more) in front of every function, not just non-test or exported functions. | drud-ddev | go |
@@ -116,6 +116,10 @@ public abstract class AbstractOAuth2AuthenticationProvider implements Authentica
final OAuthRequest request = new OAuthRequest(Verb.GET, userEndpoint, service);
request.addHeader("Authorization", "Bearer " + accessToken.getAccessToken());
request.setCharset("UTF-8");
+
+ // Microsoft
+ request.addHeader("Accept", "application/json");
+
final Response response = request.send();
int responseCode = response.getCode(); | 1 | package edu.harvard.iq.dataverse.authorization.providers.oauth2;
import com.github.scribejava.core.builder.ServiceBuilder;
import com.github.scribejava.core.builder.api.BaseApi;
import com.github.scribejava.core.model.OAuth2AccessToken;
import com.github.scribejava.core.model.OAuthRequest;
import com.github.scribejava.core.model.Response;
import com.github.scribejava.core.model.Verb;
import com.github.scribejava.core.oauth.OAuth20Service;
import edu.harvard.iq.dataverse.authorization.AuthenticatedUserDisplayInfo;
import edu.harvard.iq.dataverse.authorization.AuthenticationProvider;
import edu.harvard.iq.dataverse.authorization.AuthenticationProviderDisplayInfo;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Base class for OAuth2 identity providers, such as GitHub and ORCiD.
*
* @author michael
*/
public abstract class AbstractOAuth2AuthenticationProvider implements AuthenticationProvider {
final static Logger logger = Logger.getLogger(AbstractOAuth2AuthenticationProvider.class.getName());
protected static class ParsedUserResponse {
public final AuthenticatedUserDisplayInfo displayInfo;
public final String userIdInProvider;
public final String username;
public final List<String> emails = new ArrayList<>();
public ParsedUserResponse(AuthenticatedUserDisplayInfo aDisplayInfo, String aUserIdInProvider, String aUsername, List<String> someEmails) {
displayInfo = aDisplayInfo;
userIdInProvider = aUserIdInProvider;
username = aUsername;
emails.addAll(emails);
}
public ParsedUserResponse(AuthenticatedUserDisplayInfo displayInfo, String userIdInProvider, String username) {
this(displayInfo, userIdInProvider, username, Collections.emptyList());
}
@Override
public int hashCode() {
int hash = 7;
hash = 47 * hash + Objects.hashCode(this.userIdInProvider);
hash = 47 * hash + Objects.hashCode(this.username);
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final ParsedUserResponse other = (ParsedUserResponse) obj;
if (!Objects.equals(this.userIdInProvider, other.userIdInProvider)) {
return false;
}
if (!Objects.equals(this.username, other.username)) {
return false;
}
if (!Objects.equals(this.displayInfo, other.displayInfo)) {
return false;
}
return Objects.equals(this.emails, other.emails);
}
@Override
public String toString() {
return "ParsedUserResponse{" + "displayInfo=" + displayInfo + ", userIdInProvider=" + userIdInProvider + ", username=" + username + ", emails=" + emails + '}';
}
}
protected String id;
protected String title;
protected String subTitle;
protected String clientId;
protected String clientSecret;
protected String baseUserEndpoint;
protected String redirectUrl;
protected String scope;
public abstract BaseApi<OAuth20Service> getApiInstance();
protected abstract ParsedUserResponse parseUserResponse( String responseBody );
public OAuth20Service getService(String state, String redirectUrl) {
ServiceBuilder svcBuilder = new ServiceBuilder()
.apiKey(getClientId())
.apiSecret(getClientSecret())
.state(state)
.callback(redirectUrl);
if ( scope != null ) {
svcBuilder.scope(scope);
}
return svcBuilder.build( getApiInstance() );
}
public OAuth2UserRecord getUserRecord(String code, String state, String redirectUrl) throws IOException, OAuth2Exception {
OAuth20Service service = getService(state, redirectUrl);
OAuth2AccessToken accessToken = service.getAccessToken(code);
final String userEndpoint = getUserEndpoint(accessToken);
final OAuthRequest request = new OAuthRequest(Verb.GET, userEndpoint, service);
request.addHeader("Authorization", "Bearer " + accessToken.getAccessToken());
request.setCharset("UTF-8");
final Response response = request.send();
int responseCode = response.getCode();
final String body = response.getBody();
logger.log(Level.FINE, "In getUserRecord. Body: {0}", body);
if ( responseCode == 200 ) {
final ParsedUserResponse parsed = parseUserResponse(body);
return new OAuth2UserRecord(getId(), parsed.userIdInProvider,
parsed.username,
OAuth2TokenData.from(accessToken),
parsed.displayInfo,
parsed.emails);
} else {
throw new OAuth2Exception(responseCode, body, "Error getting the user info record.");
}
}
@Override
public boolean isUserInfoUpdateAllowed() {
return true;
}
@Override
public void updateUserInfo(String userIdInProvider, AuthenticatedUserDisplayInfo updatedUserData) {
// ignore - no account info is stored locally.
// We override this to prevent the UnsupportedOperationException thrown by
// the default implementation.
}
@Override
public AuthenticationProviderDisplayInfo getInfo() {
return new AuthenticationProviderDisplayInfo(getId(), getTitle(), getSubTitle());
}
@Override
public String getId() {
return id;
}
public String getTitle() {
return title;
}
public String getClientId() {
return clientId;
}
public String getClientSecret() {
return clientSecret;
}
public String getUserEndpoint( OAuth2AccessToken token ) {
return baseUserEndpoint;
}
public String getRedirectUrl() {
return redirectUrl;
}
public Optional<String> getIconHtml() {
return Optional.empty();
}
public void setId(String id) {
this.id = id;
}
public void setTitle(String title) {
this.title = title;
}
public void setSubTitle(String subtitle) {
this.subTitle = subtitle;
}
public String getSubTitle() {
return subTitle;
}
@Override
public int hashCode() {
int hash = 7;
hash = 97 * hash + Objects.hashCode(this.id);
hash = 97 * hash + Objects.hashCode(this.clientId);
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if ( ! (obj instanceof AbstractOAuth2AuthenticationProvider)) {
return false;
}
final AbstractOAuth2AuthenticationProvider other = (AbstractOAuth2AuthenticationProvider) obj;
if (!Objects.equals(this.id, other.id)) {
return false;
}
if (!Objects.equals(this.clientId, other.clientId)) {
return false;
}
return Objects.equals(this.clientSecret, other.clientSecret);
}
@Override
public boolean isOAuthProvider() {
return true;
}
public enum DevOAuthAccountType {
PRODUCTION,
RANDOM_EMAIL0,
RANDOM_EMAIL1,
RANDOM_EMAIL2,
RANDOM_EMAIL3,
};
}
| 1 | 40,432 | I'm slightly concerned about this because doesn't ORCID use XML instead of JSON? | IQSS-dataverse | java |
@@ -17,7 +17,11 @@ def is_new_user_stats_batch():
So, we check the database and see if the difference between the last time stats were updated
and right now is greater than 12 hours.
"""
- return datetime.now(timezone.utc) - db_stats.get_timestamp_for_last_user_stats_update() > timedelta(hours=TIME_TO_CONSIDER_STATS_AS_OLD)
+ last_update_ts = db_stats.get_timestamp_for_last_user_stats_update()
+ if(last_update_ts is None):
+ last_update_ts = datetime.min.replace(tzinfo=timezone.utc) # use min datetime value if last_update_ts is None
+
+ return datetime.now(timezone.utc) - last_update_ts > timedelta(hours=TIME_TO_CONSIDER_STATS_AS_OLD)
def notify_user_stats_update(): | 1 | """ This file contains handler functions for rabbitmq messages we
receive from the Spark cluster.
"""
import listenbrainz.db.user as db_user
import listenbrainz.db.stats as db_stats
from flask import current_app, render_template
from brainzutils.mail import send_mail
from datetime import datetime, timezone, timedelta
TIME_TO_CONSIDER_STATS_AS_OLD = 12 # hours
def is_new_user_stats_batch():
""" Returns True if this batch of user stats is new, False otherwise
User stats come in as multiple rabbitmq messages. We only wish to send an email once per batch.
So, we check the database and see if the difference between the last time stats were updated
and right now is greater than 12 hours.
"""
return datetime.now(timezone.utc) - db_stats.get_timestamp_for_last_user_stats_update() > timedelta(hours=TIME_TO_CONSIDER_STATS_AS_OLD)
def notify_user_stats_update():
if not current_app.config['TESTING']:
send_mail(
subject="New user stats are being written into the DB - ListenBrainz",
text=render_template('emails/user_stats_notification.txt', now=str(datetime.utcnow())),
recipients=['[email protected]'],
from_name='ListenBrainz',
from_addr='noreply@'+current_app.config['MAIL_FROM_DOMAIN']
)
def handle_user_artist(data):
""" Take artist stats for a user and save it in the database.
"""
musicbrainz_id = data['musicbrainz_id']
user = db_user.get_by_mb_id(musicbrainz_id)
if not user:
return
# send a notification if this is a new batch of stats
if is_new_user_stats_batch():
notify_user_stats_update()
artists = data['artist_stats']
artist_count = data['artist_count']
db_stats.insert_user_stats(user['id'], artists, {}, {}, artist_count)
def handle_dump_imported(data):
""" Process the response that the cluster sends after importing a new full dump
We don't really need to _do_ anything, just send an email over for observability.
"""
if current_app.config['TESTING']:
return
dump_name = data['imported_dump']
import_completion_time = data['time']
send_mail(
subject='A full data dump has been imported into the Spark cluster',
text=render_template('emails/dump_import_notification.txt', dump_name=dump_name, time=import_completion_time),
recipients=['[email protected]'],
from_name='ListenBrainz',
from_addr='noreply@'+current_app.config['MAIL_FROM_DOMAIN'],
)
| 1 | 15,941 | brackets around if conditions isn't really pythonic. | metabrainz-listenbrainz-server | py |
@@ -396,9 +396,6 @@ func (h *Impl) Stop() {
h.namespaceCache.Stop()
h.membershipMonitor.Stop()
h.ringpopChannel.Close()
- if err := h.grpcListener.Close(); err != nil {
- h.logger.WithTags(tag.Error(err)).Error("failed to close gRPC listener")
- }
h.runtimeMetricsReporter.Stop()
h.persistenceBean.Close()
h.visibilityMgr.Close() | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package resource
import (
"math/rand"
"net"
"os"
"sync/atomic"
"time"
"github.com/uber-go/tally"
"github.com/uber/tchannel-go"
sdkclient "go.temporal.io/sdk/client"
"go.temporal.io/server/client"
"go.temporal.io/server/client/admin"
"go.temporal.io/server/client/frontend"
"go.temporal.io/server/client/history"
"go.temporal.io/server/client/matching"
"go.temporal.io/server/common"
"go.temporal.io/server/common/archiver"
"go.temporal.io/server/common/archiver/provider"
"go.temporal.io/server/common/cache"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/loggerimpl"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/membership"
"go.temporal.io/server/common/messaging"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/persistence"
persistenceClient "go.temporal.io/server/common/persistence/client"
"go.temporal.io/server/common/service/dynamicconfig"
)
type (
// VisibilityManagerInitializer is the function each service should implement
// for visibility manager initialization
VisibilityManagerInitializer func(
persistenceBean persistenceClient.Bean,
logger log.Logger,
) (persistence.VisibilityManager, error)
// Impl contains all common resources shared across frontend / matching / history / worker
Impl struct {
status int32
// static infos
numShards int
serviceName string
hostName string
hostInfo *membership.HostInfo
metricsScope tally.Scope
clusterMetadata cluster.Metadata
// other common resources
namespaceCache cache.NamespaceCache
timeSource clock.TimeSource
payloadSerializer persistence.PayloadSerializer
metricsClient metrics.Client
messagingClient messaging.Client
archivalMetadata archiver.ArchivalMetadata
archiverProvider provider.ArchiverProvider
// membership infos
membershipMonitor membership.Monitor
frontendServiceResolver membership.ServiceResolver
matchingServiceResolver membership.ServiceResolver
historyServiceResolver membership.ServiceResolver
workerServiceResolver membership.ServiceResolver
// internal services clients
sdkClient sdkclient.Client
frontendRawClient frontend.Client
frontendClient frontend.Client
matchingRawClient matching.Client
matchingClient matching.Client
historyRawClient history.Client
historyClient history.Client
clientBean client.Bean
// persistence clients
persistenceBean persistenceClient.Bean
visibilityMgr persistence.VisibilityManager
// loggers
logger log.Logger
throttledLogger log.Logger
// for registering handlers
grpcListener net.Listener
// for ringpop listener
ringpopChannel *tchannel.Channel
// internal vars
runtimeMetricsReporter *metrics.RuntimeMetricsReporter
rpcFactory common.RPCFactory
}
)
var _ Resource = (*Impl)(nil)
// New create a new resource containing common dependencies
func New(
params *BootstrapParams,
serviceName string,
persistenceMaxQPS dynamicconfig.IntPropertyFn,
persistenceGlobalMaxQPS dynamicconfig.IntPropertyFn,
throttledLoggerMaxRPS dynamicconfig.IntPropertyFn,
visibilityManagerInitializer VisibilityManagerInitializer,
) (impl *Impl, retError error) {
logger := params.Logger.WithTags(tag.Service(serviceName))
throttledLogger := loggerimpl.NewThrottledLogger(logger, throttledLoggerMaxRPS)
numShards := params.PersistenceConfig.NumHistoryShards
hostName, err := os.Hostname()
if err != nil {
return nil, err
}
grpcListener := params.RPCFactory.GetGRPCListener()
ringpopChannel := params.RPCFactory.GetRingpopChannel()
persistenceBean, err := persistenceClient.NewBeanFromFactory(persistenceClient.NewFactory(
¶ms.PersistenceConfig,
func(...dynamicconfig.FilterOption) int {
if persistenceGlobalMaxQPS() > 0 {
// TODO: We have a bootstrap issue to correctly find memberCount. Membership relies on
// persistence to bootstrap membership ring, so we cannot have persistence rely on membership
// as it will cause circular dependency.
// ringSize, err := membershipMonitor.GetMemberCount(serviceName)
// if err == nil && ringSize > 0 {
// avgQuota := common.MaxInt(persistenceGlobalMaxQPS()/ringSize, 1)
// return common.MinInt(avgQuota, persistenceMaxQPS())
// }
}
return persistenceMaxQPS()
},
params.AbstractDatastoreFactory,
params.ClusterMetadata.GetCurrentClusterName(),
params.MetricsClient,
logger,
))
if err != nil {
return nil, err
}
membershipFactory, err := params.MembershipFactoryInitializer(persistenceBean, logger)
if err != nil {
return nil, err
}
membershipMonitor, err := membershipFactory.GetMembershipMonitor()
if err != nil {
return nil, err
}
dynamicCollection := dynamicconfig.NewCollection(params.DynamicConfig, logger)
clientBean, err := client.NewClientBean(
client.NewRPCClientFactory(
params.RPCFactory,
membershipMonitor,
params.MetricsClient,
dynamicCollection,
numShards,
logger,
),
params.ClusterMetadata,
)
if err != nil {
return nil, err
}
frontendServiceResolver, err := membershipMonitor.GetResolver(common.FrontendServiceName)
if err != nil {
return nil, err
}
matchingServiceResolver, err := membershipMonitor.GetResolver(common.MatchingServiceName)
if err != nil {
return nil, err
}
historyServiceResolver, err := membershipMonitor.GetResolver(common.HistoryServiceName)
if err != nil {
return nil, err
}
workerServiceResolver, err := membershipMonitor.GetResolver(common.WorkerServiceName)
if err != nil {
return nil, err
}
visibilityMgr, err := visibilityManagerInitializer(
persistenceBean,
logger,
)
if err != nil {
return nil, err
}
namespaceCache := cache.NewNamespaceCache(
persistenceBean.GetMetadataManager(),
params.ClusterMetadata,
params.MetricsClient,
logger,
)
frontendRawClient := clientBean.GetFrontendClient()
frontendClient := frontend.NewRetryableClient(
frontendRawClient,
common.CreateFrontendServiceRetryPolicy(),
common.IsWhitelistServiceTransientError,
)
matchingRawClient, err := clientBean.GetMatchingClient(namespaceCache.GetNamespaceName)
if err != nil {
return nil, err
}
matchingClient := matching.NewRetryableClient(
matchingRawClient,
common.CreateMatchingServiceRetryPolicy(),
common.IsWhitelistServiceTransientError,
)
historyRawClient := clientBean.GetHistoryClient()
historyClient := history.NewRetryableClient(
historyRawClient,
common.CreateHistoryServiceRetryPolicy(),
common.IsWhitelistServiceTransientError,
)
historyArchiverBootstrapContainer := &archiver.HistoryBootstrapContainer{
HistoryV2Manager: persistenceBean.GetHistoryManager(),
Logger: logger,
MetricsClient: params.MetricsClient,
ClusterMetadata: params.ClusterMetadata,
NamespaceCache: namespaceCache,
}
visibilityArchiverBootstrapContainer := &archiver.VisibilityBootstrapContainer{
Logger: logger,
MetricsClient: params.MetricsClient,
ClusterMetadata: params.ClusterMetadata,
NamespaceCache: namespaceCache,
}
if err := params.ArchiverProvider.RegisterBootstrapContainer(
serviceName,
historyArchiverBootstrapContainer,
visibilityArchiverBootstrapContainer,
); err != nil {
return nil, err
}
impl = &Impl{
status: common.DaemonStatusInitialized,
// static infos
numShards: numShards,
serviceName: params.Name,
hostName: hostName,
metricsScope: params.MetricScope,
clusterMetadata: params.ClusterMetadata,
// other common resources
namespaceCache: namespaceCache,
timeSource: clock.NewRealTimeSource(),
payloadSerializer: persistence.NewPayloadSerializer(),
metricsClient: params.MetricsClient,
messagingClient: params.MessagingClient,
archivalMetadata: params.ArchivalMetadata,
archiverProvider: params.ArchiverProvider,
// membership infos
membershipMonitor: membershipMonitor,
frontendServiceResolver: frontendServiceResolver,
matchingServiceResolver: matchingServiceResolver,
historyServiceResolver: historyServiceResolver,
workerServiceResolver: workerServiceResolver,
// internal services clients
sdkClient: params.PublicClient,
frontendRawClient: frontendRawClient,
frontendClient: frontendClient,
matchingRawClient: matchingRawClient,
matchingClient: matchingClient,
historyRawClient: historyRawClient,
historyClient: historyClient,
clientBean: clientBean,
// persistence clients
persistenceBean: persistenceBean,
visibilityMgr: visibilityMgr,
// loggers
logger: logger,
throttledLogger: throttledLogger,
// for registering grpc handlers
grpcListener: grpcListener,
// for ringpop listener
ringpopChannel: ringpopChannel,
// internal vars
runtimeMetricsReporter: metrics.NewRuntimeMetricsReporter(
params.MetricScope,
time.Minute,
logger,
params.InstanceID,
),
rpcFactory: params.RPCFactory,
}
return impl, nil
}
// Start start all resources
func (h *Impl) Start() {
if !atomic.CompareAndSwapInt32(
&h.status,
common.DaemonStatusInitialized,
common.DaemonStatusStarted,
) {
return
}
h.metricsScope.Counter(metrics.RestartCount).Inc(1)
h.runtimeMetricsReporter.Start()
h.membershipMonitor.Start()
h.namespaceCache.Start()
hostInfo, err := h.membershipMonitor.WhoAmI()
if err != nil {
h.logger.WithTags(tag.Error(err)).Fatal("fail to get host info from membership monitor")
}
h.hostInfo = hostInfo
// The service is now started up
h.logger.Info("Service resources started", tag.Address(hostInfo.GetAddress()))
// seed the random generator once for this service
rand.Seed(time.Now().UTC().UnixNano())
}
// Stop stops all resources
func (h *Impl) Stop() {
if !atomic.CompareAndSwapInt32(
&h.status,
common.DaemonStatusStarted,
common.DaemonStatusStopped,
) {
return
}
h.namespaceCache.Stop()
h.membershipMonitor.Stop()
h.ringpopChannel.Close()
if err := h.grpcListener.Close(); err != nil {
h.logger.WithTags(tag.Error(err)).Error("failed to close gRPC listener")
}
h.runtimeMetricsReporter.Stop()
h.persistenceBean.Close()
h.visibilityMgr.Close()
}
// GetServiceName return service name
func (h *Impl) GetServiceName() string {
return h.serviceName
}
// GetHostName return host name
func (h *Impl) GetHostName() string {
return h.hostName
}
// GetHostInfo return host info
func (h *Impl) GetHostInfo() *membership.HostInfo {
return h.hostInfo
}
// GetClusterMetadata return cluster metadata
func (h *Impl) GetClusterMetadata() cluster.Metadata {
return h.clusterMetadata
}
// other common resources
// GetNamespaceCache return namespace cache
func (h *Impl) GetNamespaceCache() cache.NamespaceCache {
return h.namespaceCache
}
// GetTimeSource return time source
func (h *Impl) GetTimeSource() clock.TimeSource {
return h.timeSource
}
// GetPayloadSerializer return binary payload serializer
func (h *Impl) GetPayloadSerializer() persistence.PayloadSerializer {
return h.payloadSerializer
}
// GetMetricsClient return metrics client
func (h *Impl) GetMetricsClient() metrics.Client {
return h.metricsClient
}
// GetMessagingClient return messaging client
func (h *Impl) GetMessagingClient() messaging.Client {
return h.messagingClient
}
// GetArchivalMetadata return archival metadata
func (h *Impl) GetArchivalMetadata() archiver.ArchivalMetadata {
return h.archivalMetadata
}
// GetArchiverProvider return archival provider
func (h *Impl) GetArchiverProvider() provider.ArchiverProvider {
return h.archiverProvider
}
// membership infos
// GetMembershipMonitor return the membership monitor
func (h *Impl) GetMembershipMonitor() membership.Monitor {
return h.membershipMonitor
}
// GetFrontendServiceResolver return frontend service resolver
func (h *Impl) GetFrontendServiceResolver() membership.ServiceResolver {
return h.frontendServiceResolver
}
// GetMatchingServiceResolver return matching service resolver
func (h *Impl) GetMatchingServiceResolver() membership.ServiceResolver {
return h.matchingServiceResolver
}
// GetHistoryServiceResolver return history service resolver
func (h *Impl) GetHistoryServiceResolver() membership.ServiceResolver {
return h.historyServiceResolver
}
// GetWorkerServiceResolver return worker service resolver
func (h *Impl) GetWorkerServiceResolver() membership.ServiceResolver {
return h.workerServiceResolver
}
// internal services clients
// GetSDKClient return sdk client
func (h *Impl) GetSDKClient() sdkclient.Client {
return h.sdkClient
}
// GetFrontendRawClient return frontend client without retry policy
func (h *Impl) GetFrontendRawClient() frontend.Client {
return h.frontendRawClient
}
// GetFrontendClient return frontend client with retry policy
func (h *Impl) GetFrontendClient() frontend.Client {
return h.frontendClient
}
// GetMatchingRawClient return matching client without retry policy
func (h *Impl) GetMatchingRawClient() matching.Client {
return h.matchingRawClient
}
// GetMatchingClient return matching client with retry policy
func (h *Impl) GetMatchingClient() matching.Client {
return h.matchingClient
}
// GetHistoryRawClient return history client without retry policy
func (h *Impl) GetHistoryRawClient() history.Client {
return h.historyRawClient
}
// GetHistoryClient return history client with retry policy
func (h *Impl) GetHistoryClient() history.Client {
return h.historyClient
}
// GetRemoteAdminClient return remote admin client for given cluster name
func (h *Impl) GetRemoteAdminClient(
cluster string,
) admin.Client {
return h.clientBean.GetRemoteAdminClient(cluster)
}
// GetRemoteFrontendClient return remote frontend client for given cluster name
func (h *Impl) GetRemoteFrontendClient(
cluster string,
) frontend.Client {
return h.clientBean.GetRemoteFrontendClient(cluster)
}
// GetClientBean return RPC client bean
func (h *Impl) GetClientBean() client.Bean {
return h.clientBean
}
// persistence clients
// GetMetadataManager return metadata manager
func (h *Impl) GetMetadataManager() persistence.MetadataManager {
return h.persistenceBean.GetMetadataManager()
}
// GetClusterMetadataManager return metadata manager
func (h *Impl) GetClusterMetadataManager() persistence.ClusterMetadataManager {
return h.persistenceBean.GetClusterMetadataManager()
}
// GetTaskManager return task manager
func (h *Impl) GetTaskManager() persistence.TaskManager {
return h.persistenceBean.GetTaskManager()
}
// GetVisibilityManager return visibility manager
func (h *Impl) GetVisibilityManager() persistence.VisibilityManager {
return h.visibilityMgr
}
// GetNamespaceReplicationQueue return namespace replication queue
func (h *Impl) GetNamespaceReplicationQueue() persistence.NamespaceReplicationQueue {
return h.persistenceBean.GetNamespaceReplicationQueue()
}
// GetShardManager return shard manager
func (h *Impl) GetShardManager() persistence.ShardManager {
return h.persistenceBean.GetShardManager()
}
// GetHistoryManager return history manager
func (h *Impl) GetHistoryManager() persistence.HistoryManager {
return h.persistenceBean.GetHistoryManager()
}
// GetExecutionManager return execution manager for given shard ID
func (h *Impl) GetExecutionManager(
shardID int,
) (persistence.ExecutionManager, error) {
return h.persistenceBean.GetExecutionManager(shardID)
}
// GetPersistenceBean return persistence bean
func (h *Impl) GetPersistenceBean() persistenceClient.Bean {
return h.persistenceBean
}
// loggers
// GetLogger return logger
func (h *Impl) GetLogger() log.Logger {
return h.logger
}
// GetThrottledLogger return throttled logger
func (h *Impl) GetThrottledLogger() log.Logger {
return h.throttledLogger
}
// GetGRPCListener return GRPC listener, used for registering handlers
func (h *Impl) GetGRPCListener() net.Listener {
return h.grpcListener
}
| 1 | 10,036 | It turn out that when we close server it closed underlying listener itself, so this line always generated and error. | temporalio-temporal | go |
@@ -38,13 +38,13 @@ function createObjects(user) {
error: err => console.log(err)
},
schema: [{
- name: 'Dog',
- primaryKey: '_id',
+ name: "Dog",
+ primaryKey: "_id",
properties: {
- _id: 'objectId?',
- breed: 'string?',
- name: 'string',
- realm_id: 'string?',
+ _id: "objectId?",
+ breed: "string?",
+ name: "string",
+ realm_id: "string?",
}
}]
}; | 1 | /*
This script creates 3 new objects into a new realm. These are objects are validated to exists by the download api tests.
*/
'use strict';
console.log("download-api-helper started");
const appId = process.argv[2];
const appUrl = process.argv[3];
const partition = process.argv[4];
const realmModule = process.argv[5];
function trySetElectronVersion() {
if (!process.versions || !process.env.REALM_ELECTRON_VERSION) {
return;
}
const descriptor = Object.getOwnPropertyDescriptor(process.versions, "electron");
if (descriptor.writable) {
process.versions.electron = process.env.REALM_ELECTRON_VERSION;
}
if (descriptor.set) {
descriptor.set(process.env.REALM_ELECTRON_VERSION);
}
}
// Ensure node-pre-gyp uses the correct binary
trySetElectronVersion();
const Realm = require(realmModule);
const { ObjectId } = require("bson");
function createObjects(user) {
const config = {
sync: {
user: user,
partitionValue: partition,
error: err => console.log(err)
},
schema: [{
name: 'Dog',
primaryKey: '_id',
properties: {
_id: 'objectId?',
breed: 'string?',
name: 'string',
realm_id: 'string?',
}
}]
};
const realm = new Realm(config);
realm.write(() => {
for (let i = 1; i <= 3; i++) {
realm.create('Dog', { "_id": new ObjectId(), name: `Lassy ${i}` });
}
});
console.log("Dogs count " + realm.objects('Dog').length);
let session = realm.syncSession;
return new Promise((resolve, reject) => {
let callback = (transferred, total) => {
if (transferred === total) {
session.removeProgressNotification(callback);
resolve(realm);
}
}
session.addProgressNotification('upload', 'forCurrentlyOutstandingWork', callback);
});
}
const credentials = Realm.Credentials.anonymous();
const appConfig = {
id: appId,
url: appUrl,
timeout: 1000,
app: {
name: "default",
version: '0'
},
};
let app = new Realm.App(appConfig);
app.logIn(credentials)
.catch((error) => {
const loginError = JSON.stringify(error);
console.error(`download-api-helper failed:\n User login error:\n${loginError}`);
process.exit(-2);
})
.then((user) => createObjects(user))
.then((realm) => { realm.close(); process.exit(0); });
| 1 | 19,299 | Looks like this is for debugging? Maybe just remove. | realm-realm-js | js |
@@ -187,7 +187,10 @@ module Mongoid
#
# @since 1.0.0
def exists?
- context.count > 0
+ Mongoid.unit_of_work(disable: :current) do
+ # Don't use count here since Mongo does not use counted b-tree indexes
+ !context.dup.criteria.only(:_id).limit(1).entries.first.nil?
+ end
end
# Extract a single id from the provided criteria. Could be in an $and | 1 | # encoding: utf-8
require "mongoid/criterion/inspection"
require "mongoid/criterion/scoping"
module Mongoid
# The +Criteria+ class is the core object needed in Mongoid to retrieve
# objects from the database. It is a DSL that essentially sets up the
# selector and options arguments that get passed on to a Mongo::Collection
# in the Ruby driver. Each method on the +Criteria+ returns self to they
# can be chained in order to create a readable criterion to be executed
# against the database.
class Criteria
include Enumerable
include Contextual
include Origin::Queryable
include Criterion::Inspection
include Criterion::Scoping
attr_accessor :embedded, :klass
# Returns true if the supplied +Enumerable+ or +Criteria+ is equal to the results
# of this +Criteria+ or the criteria itself.
#
# @note This will force a database load when called if an enumerable is passed.
#
# @param [ Object ] other The other +Enumerable+ or +Criteria+ to compare to.
#
# @return [ true, false ] If the objects are equal.
#
# @since 1.0.0
def ==(other)
return super if other.respond_to?(:selector)
entries == other
end
# Needed to properly get a criteria back as json
#
# @example Get the criteria as json.
# Person.where(:title => "Sir").as_json
#
# @param [ Hash ] options Options to pass through to the serializer.
#
# @return [ String ] The JSON string.
def as_json(options = nil)
entries.as_json(options)
end
# Build a document given the selector and return it.
# Complex criteria, such as $in and $or operations will get ignored.
#
# @example build the document.
# Person.where(:title => "Sir").build
#
# @example Build with selectors getting ignored.
# Person.where(:age.gt => 5).build
#
# @return [ Document ] A non-persisted document.
#
# @since 2.0.0
def build(attrs = {})
create_document(:new, attrs)
end
alias :new :build
# Tells the criteria that the cursor that gets returned needs to be
# cached. This is so multiple iterations don't hit the database multiple
# times, however this is not advisable when working with large data sets
# as the entire results will get stored in memory.
#
# @example Flag the criteria as cached.
# criteria.cache
#
# @return [ Criteria ] The cloned criteria.
def cache
crit = clone
crit.options.merge!(cache: true)
crit
end
# Will return true if the cache option has been set.
#
# @example Is the criteria cached?
# criteria.cached?
#
# @return [ true, false ] If the criteria is flagged as cached.
def cached?
options[:cache] == true
end
# Create a document in the database given the selector and return it.
# Complex criteria, such as $in and $or operations will get ignored.
#
# @example Create the document.
# Person.where(:title => "Sir").create
#
# @example Create with selectors getting ignored.
# Person.where(:age.gt => 5).create
#
# @return [ Document ] A newly created document.
#
# @since 2.0.0.rc.1
def create(attrs = {})
create_document(:create, attrs)
end
# Create a document in the database given the selector and return it.
# Complex criteria, such as $in and $or operations will get ignored.
# If validation fails, an error will be raised.
#
# @example Create the document.
# Person.where(:title => "Sir").create
#
# @example Create with selectors getting ignored.
# Person.where(:age.gt => 5).create
#
# @raise [ Errors::Validations ] on a validation error.
#
# @return [ Document ] A newly created document.
#
# @since 3.0.0
def create!(attrs = {})
create_document(:create!, attrs)
end
# Get the documents from the embedded criteria.
#
# @example Get the documents.
# criteria.documents
#
# @return [ Array<Document> ] The documents.
#
# @since 3.0.0
def documents
@documents ||= []
end
# Set the embedded documents on the criteria.
#
# @example Set the documents.
#
# @param [ Array<Document> ] docs The embedded documents.
#
# @return [ Array<Document> ] The embedded documents.
#
# @since 3.0.0
def documents=(docs)
@documents = docs
end
# Is the criteria for embedded documents?
#
# @example Is the criteria for embedded documents?
# criteria.embedded?
#
# @return [ true, false ] If the criteria is embedded.
#
# @since 3.0.0
def embedded?
!!@embedded
end
# Execute the criteria or raise an error if no documents found.
#
# @example Execute or raise
# criteria.execute_or_raise(id)
#
# @param [ Object ] args The arguments passed.
#
# @raise [ Errors::DocumentNotFound ] If nothing returned.
#
# @return [ Document, Array<Document> ] The document(s).
#
# @since 2.0.0
def execute_or_raise(ids, multi)
result = multiple_from_map_or_db(ids)
check_for_missing_documents!(result, ids)
multi ? result : result.first
end
# Return true if the criteria has some Document or not.
#
# @example Are there any documents for the criteria?
# criteria.exists?
#
# @return [ true, false ] If documents match.
#
# @since 1.0.0
def exists?
context.count > 0
end
# Extract a single id from the provided criteria. Could be in an $and
# query or a straight _id query.
#
# @example Extract the id.
# criteria.extract_id
#
# @return [ Object ] The id.
#
# @since 2.3.0
def extract_id
selector.extract_id
end
# Adds a criterion to the +Criteria+ that specifies additional options
# to be passed to the Ruby driver, in the exact format for the driver.
#
# @example Add extra params to the criteria.
# criteria.extras(:limit => 20, :skip => 40)
#
# @param [ Hash ] extras The extra driver options.
#
# @return [ Criteria ] The cloned criteria.
#
# @since 2.0.0
def extras(extras)
crit = clone
crit.options.merge!(extras)
crit
end
# Get the list of included fields.
#
# @example Get the field list.
# criteria.field_list
#
# @return [ Array<String> ] The fields.
#
# @since 2.0.0
def field_list
if options[:fields]
options[:fields].keys.reject{ |key| key == "_type" }
else
[]
end
end
# Find the matchind document(s) in the criteria for the provided ids.
#
# @example Find by an id.
# criteria.find(Moped::BSON::ObjectId.new)
#
# @example Find by multiple ids.
# criteria.find([ Moped::BSON::ObjectId.new, Moped::BSON::ObjectId.new ])
#
# @param [ Array<Moped::BSON::ObjectId> ] args The ids to search for.
#
# @return [ Array<Document>, Document ] The matching document(s).
#
# @since 1.0.0
def find(*args)
ids = args.__find_args__
raise_invalid if ids.any?(&:nil?)
for_ids(ids).execute_or_raise(ids, args.multi_arged?)
end
# Adds a criterion to the +Criteria+ that specifies an id that must be matched.
#
# @example Add a single id criteria.
# criteria.for_ids([ 1 ])
#
# @example Add multiple id criteria.
# criteria.for_ids([ 1, 2 ])
#
# @param [ Array ] ids The array of ids.
#
# @return [ Criteria ] The cloned criteria.
def for_ids(ids)
ids = mongoize_ids(ids)
method = extract_id ? :all_of : :where
if ids.size > 1
send(method, { _id: { "$in" => ids }})
else
send(method, { _id: ids.first })
end
end
# When freezing a criteria we need to initialize the context first
# otherwise the setting of the context on attempted iteration will raise a
# runtime error.
#
# @example Freeze the criteria.
# criteria.freeze
#
# @return [ Criteria ] The frozen criteria.
#
# @since 2.0.0
def freeze
context and inclusions and super
end
# Get the document from the identity map, and if not found hit the
# database.
#
# @example Get the document from the map or criteria.
# criteria.from_map_or_db
#
# @return [ Document ] The found document.
#
# @since 2.2.1
def from_map_or_db
id = extract_id
id = klass.fields["_id"].mongoize(id) if id
doc = IdentityMap.get(klass, id || selector.except("_type"))
doc && doc.matches?(selector) ? doc : first
end
# Get the documents from the identity map, and if not found hit the
# database.
#
# @example Get the documents from the map or criteria.
# criteria.multiple_from_map_or_db(ids)
#
# @param [ ids ] The searched ids.
#
# @return [ Array<Document> ] The found documents.
def multiple_from_map_or_db(ids)
return entries if embedded?
ids = mongoize_ids(ids)
result = from_identity_map(ids)
ids.empty? ? result : result + from_database(ids)
end
# Initialize the new criteria.
#
# @example Init the new criteria.
# Criteria.new(Band)
#
# @param [ Class ] klass The model class.
#
# @since 1.0.0
def initialize(klass)
@klass = klass
super(klass.aliased_fields, klass.fields)
end
# Eager loads all the provided relations. Will load all the documents
# into the identity map who's ids match based on the extra query for the
# ids.
#
# @note This will only work if Mongoid's identity map is enabled. To do
# so set identity_map_enabled: true in your mongoid.yml
#
# @note This will work for embedded relations that reference another
# collection via belongs_to as well.
#
# @note Eager loading brings all the documents into memory, so there is a
# sweet spot on the performance gains. Internal benchmarks show that
# eager loading becomes slower around 100k documents, but this will
# naturally depend on the specific application.
#
# @example Eager load the provided relations.
# Person.includes(:posts, :game)
#
# @param [ Array<Symbol> ] relations The names of the relations to eager
# load.
#
# @return [ Criteria ] The cloned criteria.
#
# @since 2.2.0
def includes(*relations)
relations.flatten.each do |name|
metadata = klass.reflect_on_association(name)
raise Errors::InvalidIncludes.new(klass, relations) unless metadata
inclusions.push(metadata) unless inclusions.include?(metadata)
end
clone
end
# Get a list of criteria that are to be executed for eager loading.
#
# @example Get the eager loading inclusions.
# Person.includes(:game).inclusions
#
# @return [ Array<Metadata> ] The inclusions.
#
# @since 2.2.0
def inclusions
@inclusions ||= []
end
# Set the inclusions for the criteria.
#
# @example Set the inclusions.
# criteria.inclusions = [ meta ]
#
# @param [ Array<Metadata> ] The inclusions.
#
# @return [ Array<Metadata> ] The new inclusions.
#
# @since 3.0.0
def inclusions=(value)
@inclusions = value
end
# Merges another object with this +Criteria+ and returns a new criteria.
# The other object may be a +Criteria+ or a +Hash+. This is used to
# combine multiple scopes together, where a chained scope situation
# may be desired.
#
# @example Merge the criteria with another criteria.
# criteri.merge(other_criteria)
#
# @param [ Criteria ] other The other criterion to merge with.
#
# @return [ Criteria ] A cloned self.
def merge(other)
crit = clone
crit.merge!(other)
crit
end
# Merge the other criteria into this one.
#
# @example Merge another criteria into this criteria.
# criteria.merge(Person.where(name: "bob"))
#
# @param [ Criteria ] other The criteria to merge in.
#
# @return [ Criteria ] The merged criteria.
#
# @since 3.0.0
def merge!(other)
criteria = other.to_criteria
selector.merge!(criteria.selector)
options.merge!(criteria.options)
self.documents = criteria.documents.dup unless criteria.documents.empty?
self.scoping_options = criteria.scoping_options
self.inclusions = (inclusions + criteria.inclusions.dup).uniq
self
end
# Overriden to include _type in the fields.
#
# @example Limit the fields returned from the database.
# Band.only(:name)
#
# @param [ Array<Symbol> ] args The names of the fields.
#
# @return [ Criteria ] The cloned criteria.
#
# @since 1.0.0
def only(*args)
return clone if args.empty?
args = args.flatten
if klass.hereditary?
super(*args.push(:_type))
else
super(*args)
end
end
# Returns true if criteria responds to the given method.
#
# @example Does the criteria respond to the method?
# crtiteria.respond_to?(:each)
#
# @param [ Symbol ] name The name of the class method on the +Document+.
# @param [ true, false ] include_private Whether to include privates.
#
# @return [ true, false ] If the criteria responds to the method.
def respond_to?(name, include_private = false)
super || klass.respond_to?(name) || entries.respond_to?(name, include_private)
end
alias :to_ary :to_a
# Convenience for objects that want to be merged into a criteria.
#
# @example Convert to a criteria.
# criteria.to_criteria
#
# @return [ Criteria ] self.
#
# @since 3.0.0
def to_criteria
self
end
# Convert the criteria to a proc.
#
# @example Convert the criteria to a proc.
# criteria.to_proc
#
# @return [ Proc ] The wrapped criteria.
#
# @since 3.0.0
def to_proc
->{ self }
end
# Adds a criterion to the +Criteria+ that specifies a type or an Array of
# types that must be matched.
#
# @example Match only specific models.
# criteria.type('Browser')
# criteria.type(['Firefox', 'Browser'])
#
# @param [ Array<String> ] types The types to match against.
#
# @return [ Criteria ] The cloned criteria.
def type(types)
any_in(_type: Array(types))
end
# This is the general entry point for most MongoDB queries. This either
# creates a standard field: value selection, and expanded selection with
# the use of hash methods, or a $where selection if a string is provided.
#
# @example Add a standard selection.
# criteria.where(name: "syd")
#
# @example Add a javascript selection.
# criteria.where("this.name == 'syd'")
#
# @param [ String, Hash ] criterion The javascript or standard selection.
#
# @raise [ UnsupportedJavascript ] If provided a string and the criteria
# is embedded.
#
# @return [ Criteria ] The cloned selectable.
#
# @since 1.0.0
def where(expression)
if expression.is_a?(::String) && embedded?
raise Errors::UnsupportedJavascript.new(klass, expression)
end
super
end
private
# Are documents in the query missing, and are we configured to raise an
# error?
#
# @api private
#
# @example Check for missing documents.
# criteria.check_for_missing_documents!([], [ 1 ])
#
# @param [ Array<Document> ] result The result.
# @param [ Array<Object> ] ids The ids.
#
# @raise [ Errors::DocumentNotFound ] If none are found and raising an
# error.
#
# @since 3.0.0
def check_for_missing_documents!(result, ids)
if (result.size < ids.size) && Mongoid.raise_not_found_error
raise Errors::DocumentNotFound.new(klass, ids, ids - result.map(&:_id))
end
end
# Create a document given the provided method and attributes from the
# existing selector.
#
# @api private
#
# @example Create a new document.
# criteria.create_document(:new, {})
#
# @param [ Symbol ] method Either :new or :create.
# @param [ Hash ] attrs Additional attributes to use.
#
# @return [ Document ] The new or saved document.
#
# @since 3.0.0
def create_document(method, attrs = {})
klass.__send__(method,
selector.reduce(attrs) do |hash, (key, value)|
unless key.to_s =~ /\$/ || value.is_a?(Hash)
hash[key] = value
end
hash
end
)
end
# Clone or dup the current +Criteria+. This will return a new criteria with
# the selector, options, klass, embedded options, etc intact.
#
# @api private
#
# @example Clone a criteria.
# criteria.clone
#
# @example Dup a criteria.
# criteria.dup
#
# @param [ Criteria ] other The criteria getting cloned.
#
# @return [ nil ] nil.
#
# @since 1.0.0
def initialize_copy(other)
@selector = other.selector.dup
@options = other.options.dup
@inclusions = other.inclusions.dup
@scoping_options = other.scoping_options
@documents = other.documents.dup
@context = nil
end
# Get documents from the database only.
#
# @api private
#
# @example Get documents from the database.
# criteria.from_database(ids)
#
# @param [ Array<Object> ] ids The ids to fetch with.
#
# @return [ Array<Document> ] The matching documents.
#
# @since 3.0.0
def from_database(ids)
(ids.size > 1 ? any_in(id: ids) : where(id: ids.first)).entries
end
# Get documents from the identity map only.
#
# @api private
#
# @example Get documents from the identity map.
# criteria.from_identity_map(ids)
#
# @param [ Array<Object> ] ids The ids to fetch with.
#
# @return [ Array<Document> ] The matching documents.
#
# @since 3.0.0
def from_identity_map(ids)
result = []
ids.reject! do |id|
doc = IdentityMap.get(klass, id)
doc && doc.matches?(selector) ? result.push(doc) : false
end
result
end
# Used for chaining +Criteria+ scopes together in the for of class methods
# on the +Document+ the criteria is for.
#
# @example Handle method missing.
# criteria.method_missing(:name)
#
# @param [ Symbol ] name The method name.
# @param [ Array ] args The arguments.
#
# @return [ Object ] The result of the method call.
#
# @since 1.0.0
def method_missing(name, *args, &block)
if klass.respond_to?(name)
klass.send(:with_scope, self) do
klass.send(name, *args, &block)
end
else
return entries.send(name, *args)
end
end
# Convert all the ids to their proper types.
#
# @api private
#
# @example Convert the ids.
# criteria.mongoize_ids(ids)
#
# @param [ Array<Object> ] ids The ids to convert.
#
# @return [ Array<Object> ] The converted ids.
#
# @since 3.0.0
def mongoize_ids(ids)
ids.map{ |id| klass.fields["_id"].mongoize(id) }
end
# Convenience method of raising an invalid options error.
#
# @example Raise the error.
# criteria.raise_invalid
#
# @raise [ Errors::InvalidOptions ] The error.
#
# @since 2.0.0
def raise_invalid
raise Errors::InvalidFind.new
end
end
end
| 1 | 9,661 | Same here - this code is duplicated. I think we can just remove the `exists?` method completely from `Criteria` and it should delegate to the context. | mongodb-mongoid | rb |
@@ -150,8 +150,14 @@ void SYCLInternal::initialize(const sycl::queue& q) {
m_maxShmemPerBlock =
d.template get_info<sycl::info::device::local_mem_size>();
- m_indirectKernelMem.reset(*m_queue, m_instance_id);
+
m_indirectReducerMem.reset(*m_queue, m_instance_id);
+ for (auto& usm_mem : m_indirectKernelMem) {
+ usm_mem.reset(*m_queue, m_instance_id);
+ // TODO 0x1440= 5184, arbitrary, larger than largest encountered kernel.
+ usm_mem.reserve(0x1440);
+ }
+
} else {
std::ostringstream msg;
msg << "Kokkos::Experimental::SYCL::initialize(...) FAILED"; | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Core.hpp> //kokkos_malloc
namespace Kokkos {
namespace Experimental {
namespace Impl {
std::vector<std::optional<sycl::queue>*> SYCLInternal::all_queues;
std::mutex SYCLInternal::mutex;
SYCLInternal::~SYCLInternal() {
if (!was_finalized || m_scratchSpace || m_scratchFlags ||
m_scratchConcurrentBitset) {
std::cerr << "Kokkos::Experimental::SYCL ERROR: Failed to call "
"Kokkos::Experimental::SYCL::finalize()"
<< std::endl;
std::cerr.flush();
}
}
int SYCLInternal::verify_is_initialized(const char* const label) const {
if (!is_initialized()) {
std::cerr << "Kokkos::Experimental::SYCL::" << label
<< " : ERROR device not initialized" << std::endl;
}
return is_initialized();
}
SYCLInternal& SYCLInternal::singleton() {
static SYCLInternal self;
return self;
}
void SYCLInternal::initialize(const sycl::device& d) {
auto exception_handler = [](sycl::exception_list exceptions) {
bool asynchronous_error = false;
for (std::exception_ptr const& e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const& e) {
std::cerr << e.what() << '\n';
asynchronous_error = true;
}
}
if (asynchronous_error)
Kokkos::Impl::throw_runtime_exception(
"There was an asynchronous SYCL error!\n");
};
// FIXME_SYCL using an in-order queue here should not be necessary since we
// are using submit_barrier for managing kernel dependencies but this seems to
// be required as a hot fix for now.
initialize(
sycl::queue{d, exception_handler, sycl::property::queue::in_order()});
}
// FIXME_SYCL
void SYCLInternal::initialize(const sycl::queue& q) {
if (was_finalized)
Kokkos::abort("Calling SYCL::initialize after SYCL::finalize is illegal\n");
if (is_initialized()) return;
if (!HostSpace::execution_space::impl_is_initialized()) {
const std::string msg(
"SYCL::initialize ERROR : HostSpace::execution_space is not "
"initialized");
Kokkos::Impl::throw_runtime_exception(msg);
}
const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;
const bool ok_dev = true;
if (ok_init && ok_dev) {
m_queue = q;
// guard pushing to all_queues
{
std::scoped_lock lock(mutex);
all_queues.push_back(&m_queue);
}
const sycl::device& d = m_queue->get_device();
m_maxWorkgroupSize =
d.template get_info<sycl::info::device::max_work_group_size>();
// FIXME_SYCL this should give the correct value for NVIDIA GPUs
m_maxConcurrency =
m_maxWorkgroupSize * 2 *
d.template get_info<sycl::info::device::max_compute_units>();
// Setup concurent bitset for obtaining unique tokens from within an
// executing kernel.
{
const int32_t buffer_bound =
Kokkos::Impl::concurrent_bitset::buffer_bound(m_maxConcurrency);
using Record = Kokkos::Impl::SharedAllocationRecord<
Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
Record* const r =
Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
"Kokkos::Experimental::SYCL::InternalScratchBitset",
sizeof(uint32_t) * buffer_bound);
Record::increment(r);
m_scratchConcurrentBitset = reinterpret_cast<uint32_t*>(r->data());
auto event = m_queue->memset(m_scratchConcurrentBitset, 0,
sizeof(uint32_t) * buffer_bound);
fence(event,
"Kokkos::Experimental::SYCLInternal::initialize: fence after "
"initializing m_scratchConcurrentBitset",
m_instance_id);
}
m_maxShmemPerBlock =
d.template get_info<sycl::info::device::local_mem_size>();
m_indirectKernelMem.reset(*m_queue, m_instance_id);
m_indirectReducerMem.reset(*m_queue, m_instance_id);
} else {
std::ostringstream msg;
msg << "Kokkos::Experimental::SYCL::initialize(...) FAILED";
if (!ok_init) {
msg << " : Already initialized";
}
Kokkos::Impl::throw_runtime_exception(msg.str());
}
m_team_scratch_current_size = 0;
m_team_scratch_ptr = nullptr;
}
void* SYCLInternal::resize_team_scratch_space(std::int64_t bytes,
bool force_shrink) {
if (m_team_scratch_current_size == 0) {
m_team_scratch_current_size = bytes;
m_team_scratch_ptr =
Kokkos::kokkos_malloc<Experimental::SYCLDeviceUSMSpace>(
"Kokkos::Experimental::SYCLDeviceUSMSpace::TeamScratchMemory",
m_team_scratch_current_size);
}
if ((bytes > m_team_scratch_current_size) ||
((bytes < m_team_scratch_current_size) && (force_shrink))) {
m_team_scratch_current_size = bytes;
m_team_scratch_ptr =
Kokkos::kokkos_realloc<Experimental::SYCLDeviceUSMSpace>(
m_team_scratch_ptr, m_team_scratch_current_size);
}
return m_team_scratch_ptr;
}
uint32_t SYCLInternal::impl_get_instance_id() const { return m_instance_id; }
void SYCLInternal::finalize() {
SYCLInternal::fence(*m_queue,
"Kokkos::SYCLInternal::finalize: fence on finalization",
m_instance_id);
was_finalized = true;
using RecordSYCL = Kokkos::Impl::SharedAllocationRecord<SYCLDeviceUSMSpace>;
if (nullptr != m_scratchSpace)
RecordSYCL::decrement(RecordSYCL::get_record(m_scratchSpace));
if (nullptr != m_scratchFlags)
RecordSYCL::decrement(RecordSYCL::get_record(m_scratchFlags));
m_syclDev = -1;
m_scratchSpaceCount = 0;
m_scratchSpace = nullptr;
m_scratchFlagsCount = 0;
m_scratchFlags = nullptr;
RecordSYCL::decrement(RecordSYCL::get_record(m_scratchConcurrentBitset));
m_scratchConcurrentBitset = nullptr;
if (m_team_scratch_current_size > 0)
Kokkos::kokkos_free<Kokkos::Experimental::SYCLDeviceUSMSpace>(
m_team_scratch_ptr);
m_team_scratch_current_size = 0;
m_team_scratch_ptr = nullptr;
m_indirectKernelMem.reset();
m_indirectReducerMem.reset();
// guard erasing from all_queues
{
std::scoped_lock lock(mutex);
all_queues.erase(std::find(all_queues.begin(), all_queues.end(), &m_queue));
}
m_queue.reset();
}
void* SYCLInternal::scratch_space(const std::size_t size) {
const size_type sizeScratchGrain =
sizeof(Kokkos::Experimental::SYCL::size_type);
if (verify_is_initialized("scratch_space") &&
m_scratchSpaceCount * sizeScratchGrain < size) {
m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
using Record = Kokkos::Impl::SharedAllocationRecord<
Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
if (nullptr != m_scratchSpace)
Record::decrement(Record::get_record(m_scratchSpace));
Record* const r =
Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
"Kokkos::Experimental::SYCL::InternalScratchSpace",
(sizeScratchGrain * m_scratchSpaceCount));
Record::increment(r);
m_scratchSpace = reinterpret_cast<size_type*>(r->data());
}
return m_scratchSpace;
}
void* SYCLInternal::scratch_flags(const std::size_t size) {
const size_type sizeScratchGrain =
sizeof(Kokkos::Experimental::SYCL::size_type);
if (verify_is_initialized("scratch_flags") &&
m_scratchFlagsCount * sizeScratchGrain < size) {
m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
using Record = Kokkos::Impl::SharedAllocationRecord<
Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
if (nullptr != m_scratchFlags)
Record::decrement(Record::get_record(m_scratchFlags));
Record* const r =
Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
"Kokkos::Experimental::SYCL::InternalScratchFlags",
(sizeScratchGrain * m_scratchFlagsCount));
Record::increment(r);
m_scratchFlags = reinterpret_cast<size_type*>(r->data());
}
m_queue->memset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain);
fence(*m_queue,
"Kokkos::Experimental::SYCLInternal::scratch_flags fence after "
"initializing m_scratchFlags",
m_instance_id);
return m_scratchFlags;
}
template <typename WAT>
void SYCLInternal::fence_helper(WAT& wat, const std::string& name,
uint32_t instance_id) {
Kokkos::Tools::Experimental::Impl::profile_fence_event<
Kokkos::Experimental::SYCL>(
name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{instance_id},
[&]() {
try {
wat.wait_and_throw();
} catch (sycl::exception const& e) {
Kokkos::Impl::throw_runtime_exception(
std::string("There was a synchronous SYCL error:\n") += e.what());
}
});
}
template void SYCLInternal::fence_helper<sycl::queue>(sycl::queue&,
const std::string&,
uint32_t);
template void SYCLInternal::fence_helper<sycl::event>(sycl::event&,
const std::string&,
uint32_t);
template <sycl::usm::alloc Kind>
size_t SYCLInternal::USMObjectMem<Kind>::reserve(size_t n) {
assert(m_q);
if (m_capacity < n) {
using Record = Kokkos::Impl::SharedAllocationRecord<AllocationSpace, void>;
// First free what we have (in case malloc can reuse it)
if (m_data) Record::decrement(Record::get_record(m_data));
Record* const r = Record::allocate(
AllocationSpace(*m_q), "Kokkos::Experimental::SYCL::USMObjectMem", n);
Record::increment(r);
m_data = r->data();
m_capacity = n;
}
return m_capacity;
}
template <sycl::usm::alloc Kind>
void SYCLInternal::USMObjectMem<Kind>::reset() {
if (m_data) {
// This implies a fence since this class is not copyable
// and deallocating implies a fence across all registered queues.
using Record = Kokkos::Impl::SharedAllocationRecord<AllocationSpace, void>;
Record::decrement(Record::get_record(m_data));
m_capacity = 0;
m_data = nullptr;
}
m_q.reset();
}
template class SYCLInternal::USMObjectMem<sycl::usm::alloc::shared>;
template class SYCLInternal::USMObjectMem<sycl::usm::alloc::device>;
template class SYCLInternal::USMObjectMem<sycl::usm::alloc::host>;
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
| 1 | 32,241 | I'm not quite sure that we preallocate memory. Do you have a good reason for that? | kokkos-kokkos | cpp |
@@ -731,6 +731,17 @@ func (s *Server) checkAuth(c *client) bool {
}
}
+// Check that number of clients is below Max connection setting.
+func (s *Server) checkMaxConn(c *client) bool {
+ if c.typ == CLIENT {
+ s.mu.Lock()
+ ok := len(s.clients) <= s.opts.MaxConn
+ s.mu.Unlock()
+ return ok
+ }
+ return true
+}
+
// Remove a client or route from our internal accounting.
func (s *Server) removeClient(c *client) {
var rID string | 1 | // Copyright 2012-2016 Apcera Inc. All rights reserved.
package server
import (
"bufio"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"runtime"
"strconv"
"sync"
"time"
// Allow dynamic profiling.
"github.com/nats-io/gnatsd/util"
_ "net/http/pprof"
)
// Info is the information sent to clients to help them understand information
// about this server.
type Info struct {
ID string `json:"server_id"`
Version string `json:"version"`
GoVersion string `json:"go"`
Host string `json:"host"`
Port int `json:"port"`
AuthRequired bool `json:"auth_required"`
SSLRequired bool `json:"ssl_required"` // DEPRECATED: ssl json used for older clients
TLSRequired bool `json:"tls_required"`
TLSVerify bool `json:"tls_verify"`
MaxPayload int `json:"max_payload"`
IP string `json:"ip,omitempty"`
ClientConnectURLs []string `json:"connect_urls,omitempty"` // Contains URLs a client can connect to.
// Used internally for quick look-ups.
clientConnectURLs map[string]struct{}
}
// Server is our main struct.
type Server struct {
gcid uint64
grid uint64
stats
mu sync.Mutex
info Info
infoJSON []byte
sl *Sublist
opts *Options
cAuth Auth
rAuth Auth
trace bool
debug bool
running bool
listener net.Listener
clients map[uint64]*client
routes map[uint64]*client
remotes map[string]*client
totalClients uint64
done chan bool
start time.Time
http net.Listener
httpReqStats map[string]uint64
routeListener net.Listener
routeInfo Info
routeInfoJSON []byte
rcQuit chan bool
grMu sync.Mutex
grTmpClients map[uint64]*client
grRunning bool
grWG sync.WaitGroup // to wait on various go routines
cproto int64 // number of clients supporting async INFO
}
// Make sure all are 64bits for atomic use
type stats struct {
inMsgs int64
outMsgs int64
inBytes int64
outBytes int64
slowConsumers int64
}
// New will setup a new server struct after parsing the options.
func New(opts *Options) *Server {
processOptions(opts)
// Process TLS options, including whether we require client certificates.
tlsReq := opts.TLSConfig != nil
verify := (tlsReq && opts.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert)
info := Info{
ID: genID(),
Version: VERSION,
GoVersion: runtime.Version(),
Host: opts.Host,
Port: opts.Port,
AuthRequired: false,
TLSRequired: tlsReq,
SSLRequired: tlsReq,
TLSVerify: verify,
MaxPayload: opts.MaxPayload,
clientConnectURLs: make(map[string]struct{}),
}
s := &Server{
info: info,
sl: NewSublist(),
opts: opts,
debug: opts.Debug,
trace: opts.Trace,
done: make(chan bool, 1),
start: time.Now(),
}
s.mu.Lock()
defer s.mu.Unlock()
// For tracking clients
s.clients = make(map[uint64]*client)
// For tracking connections that are not yet registered
// in s.routes, but for which readLoop has started.
s.grTmpClients = make(map[uint64]*client)
// For tracking routes and their remote ids
s.routes = make(map[uint64]*client)
s.remotes = make(map[string]*client)
// Used to kick out all of the route
// connect Go routines.
s.rcQuit = make(chan bool)
s.generateServerInfoJSON()
s.handleSignals()
return s
}
// SetClientAuthMethod sets the authentication method for clients.
func (s *Server) SetClientAuthMethod(authMethod Auth) {
s.mu.Lock()
defer s.mu.Unlock()
s.info.AuthRequired = true
s.cAuth = authMethod
s.generateServerInfoJSON()
}
// SetRouteAuthMethod sets the authentication method for routes.
func (s *Server) SetRouteAuthMethod(authMethod Auth) {
s.mu.Lock()
defer s.mu.Unlock()
s.rAuth = authMethod
}
func (s *Server) generateServerInfoJSON() {
// Generate the info json
b, err := json.Marshal(s.info)
if err != nil {
Fatalf("Error marshalling INFO JSON: %+v\n", err)
return
}
s.infoJSON = []byte(fmt.Sprintf("INFO %s %s", b, CR_LF))
}
// PrintAndDie is exported for access in other packages.
func PrintAndDie(msg string) {
fmt.Fprintf(os.Stderr, "%s\n", msg)
os.Exit(1)
}
// PrintServerAndExit will print our version and exit.
func PrintServerAndExit() {
fmt.Printf("nats-server version %s\n", VERSION)
os.Exit(0)
}
// Protected check on running state
func (s *Server) isRunning() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.running
}
func (s *Server) logPid() {
pidStr := strconv.Itoa(os.Getpid())
err := ioutil.WriteFile(s.opts.PidFile, []byte(pidStr), 0660)
if err != nil {
PrintAndDie(fmt.Sprintf("Could not write pidfile: %v\n", err))
}
}
// Start up the server, this will block.
// Start via a Go routine if needed.
func (s *Server) Start() {
Noticef("Starting nats-server version %s", VERSION)
Debugf("Go build version %s", s.info.GoVersion)
// Avoid RACE between Start() and Shutdown()
s.mu.Lock()
s.running = true
s.mu.Unlock()
s.grMu.Lock()
s.grRunning = true
s.grMu.Unlock()
// Log the pid to a file
if s.opts.PidFile != _EMPTY_ {
s.logPid()
}
// Start up the http server if needed.
if s.opts.HTTPPort != 0 {
s.StartHTTPMonitoring()
}
// Start up the https server if needed.
if s.opts.HTTPSPort != 0 {
if s.opts.TLSConfig == nil {
Fatalf("TLS cert and key required for HTTPS")
return
}
s.StartHTTPSMonitoring()
}
// The Routing routine needs to wait for the client listen
// port to be opened and potential ephemeral port selected.
clientListenReady := make(chan struct{})
// Start up routing as well if needed.
if s.opts.ClusterPort != 0 {
s.startGoRoutine(func() {
s.StartRouting(clientListenReady)
})
}
// Pprof http endpoint for the profiler.
if s.opts.ProfPort != 0 {
s.StartProfiler()
}
// Wait for clients.
s.AcceptLoop(clientListenReady)
}
// Shutdown will shutdown the server instance by kicking out the AcceptLoop
// and closing all associated clients.
func (s *Server) Shutdown() {
s.mu.Lock()
// Prevent issues with multiple calls.
if !s.running {
s.mu.Unlock()
return
}
s.running = false
s.grMu.Lock()
s.grRunning = false
s.grMu.Unlock()
conns := make(map[uint64]*client)
// Copy off the clients
for i, c := range s.clients {
conns[i] = c
}
// Copy off the connections that are not yet registered
// in s.routes, but for which the readLoop has started
s.grMu.Lock()
for i, c := range s.grTmpClients {
conns[i] = c
}
s.grMu.Unlock()
// Copy off the routes
for i, r := range s.routes {
conns[i] = r
}
// Number of done channel responses we expect.
doneExpected := 0
// Kick client AcceptLoop()
if s.listener != nil {
doneExpected++
s.listener.Close()
s.listener = nil
}
// Kick route AcceptLoop()
if s.routeListener != nil {
doneExpected++
s.routeListener.Close()
s.routeListener = nil
}
// Kick HTTP monitoring if its running
if s.http != nil {
doneExpected++
s.http.Close()
s.http = nil
}
// Release the solicited routes connect go routines.
close(s.rcQuit)
s.mu.Unlock()
// Close client and route connections
for _, c := range conns {
c.closeConnection()
}
// Block until the accept loops exit
for doneExpected > 0 {
<-s.done
doneExpected--
}
// Wait for go routines to be done.
s.grWG.Wait()
}
// AcceptLoop is exported for easier testing.
func (s *Server) AcceptLoop(clr chan struct{}) {
// If we were to exit before the listener is setup properly,
// make sure we close the channel.
defer func() {
if clr != nil {
close(clr)
}
}()
hp := net.JoinHostPort(s.opts.Host, strconv.Itoa(s.opts.Port))
Noticef("Listening for client connections on %s", hp)
l, e := net.Listen("tcp", hp)
if e != nil {
Fatalf("Error listening on port: %s, %q", hp, e)
return
}
// Alert of TLS enabled.
if s.opts.TLSConfig != nil {
Noticef("TLS required for client connections")
}
Debugf("Server id is %s", s.info.ID)
Noticef("Server is ready")
// Setup state that can enable shutdown
s.mu.Lock()
s.listener = l
// If server was started with RANDOM_PORT (-1), opts.Port would be equal
// to 0 at the beginning this function. So we need to get the actual port
if s.opts.Port == 0 {
// Write resolved port back to options.
_, port, err := net.SplitHostPort(l.Addr().String())
if err != nil {
Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
s.mu.Unlock()
return
}
portNum, err := strconv.Atoi(port)
if err != nil {
Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
s.mu.Unlock()
return
}
s.opts.Port = portNum
}
s.mu.Unlock()
// Let the caller know that we are ready
close(clr)
clr = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
Debugf("Temporary Client Accept Error(%v), sleeping %dms",
ne, tmpDelay/time.Millisecond)
time.Sleep(tmpDelay)
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else if s.isRunning() {
Noticef("Accept error: %v", err)
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createClient(conn)
s.grWG.Done()
})
}
Noticef("Server Exiting..")
s.done <- true
}
// StartProfiler is called to enable dynamic profiling.
func (s *Server) StartProfiler() {
Noticef("Starting profiling on http port %d", s.opts.ProfPort)
hp := net.JoinHostPort(s.opts.Host, strconv.Itoa(s.opts.ProfPort))
go func() {
err := http.ListenAndServe(hp, nil)
if err != nil {
Fatalf("error starting monitor server: %s", err)
}
}()
}
// StartHTTPMonitoring will enable the HTTP monitoring port.
func (s *Server) StartHTTPMonitoring() {
s.startMonitoring(false)
}
// StartHTTPSMonitoring will enable the HTTPS monitoring port.
func (s *Server) StartHTTPSMonitoring() {
s.startMonitoring(true)
}
// HTTP endpoints
const (
RootPath = "/"
VarzPath = "/varz"
ConnzPath = "/connz"
RoutezPath = "/routez"
SubszPath = "/subsz"
StackszPath = "/stacksz"
)
// Start the monitoring server
func (s *Server) startMonitoring(secure bool) {
// Used to track HTTP requests
s.httpReqStats = map[string]uint64{
RootPath: 0,
VarzPath: 0,
ConnzPath: 0,
RoutezPath: 0,
SubszPath: 0,
}
var hp string
var err error
if secure {
hp = net.JoinHostPort(s.opts.HTTPHost, strconv.Itoa(s.opts.HTTPSPort))
Noticef("Starting https monitor on %s", hp)
config := util.CloneTLSConfig(s.opts.TLSConfig)
config.ClientAuth = tls.NoClientCert
s.http, err = tls.Listen("tcp", hp, config)
} else {
hp = net.JoinHostPort(s.opts.HTTPHost, strconv.Itoa(s.opts.HTTPPort))
Noticef("Starting http monitor on %s", hp)
s.http, err = net.Listen("tcp", hp)
}
if err != nil {
Fatalf("Can't listen to the monitor port: %v", err)
return
}
mux := http.NewServeMux()
// Root
mux.HandleFunc(RootPath, s.HandleRoot)
// Varz
mux.HandleFunc(VarzPath, s.HandleVarz)
// Connz
mux.HandleFunc(ConnzPath, s.HandleConnz)
// Routez
mux.HandleFunc(RoutezPath, s.HandleRoutez)
// Subz
mux.HandleFunc(SubszPath, s.HandleSubsz)
// Subz alias for backwards compatibility
mux.HandleFunc("/subscriptionsz", s.HandleSubsz)
// Stacksz
mux.HandleFunc(StackszPath, s.HandleStacksz)
srv := &http.Server{
Addr: hp,
Handler: mux,
ReadTimeout: 2 * time.Second,
WriteTimeout: 2 * time.Second,
MaxHeaderBytes: 1 << 20,
}
go func() {
srv.Serve(s.http)
srv.Handler = nil
s.done <- true
}()
}
func (s *Server) createClient(conn net.Conn) *client {
c := &client{srv: s, nc: conn, opts: defaultOpts, mpay: s.info.MaxPayload, start: time.Now()}
// Grab JSON info string
s.mu.Lock()
info := s.infoJSON
authRequired := s.info.AuthRequired
tlsRequired := s.info.TLSRequired
s.totalClients++
s.mu.Unlock()
// Grab lock
c.mu.Lock()
// Initialize
c.initClient()
c.Debugf("Client connection created")
// Check for Auth
if authRequired {
c.setAuthTimer(secondsToDuration(s.opts.AuthTimeout))
}
// Send our information.
c.sendInfo(info)
// Unlock to register
c.mu.Unlock()
// Register with the server.
s.mu.Lock()
// If server is not running, Shutdown() may have already gathered the
// list of connections to close. It won't contain this one, so we need
// to bail out now otherwise the readLoop started down there would not
// be interrupted.
if !s.running {
s.mu.Unlock()
return c
}
s.clients[c.cid] = c
s.mu.Unlock()
// Re-Grab lock
c.mu.Lock()
// Check for TLS
if tlsRequired {
c.Debugf("Starting TLS client connection handshake")
c.nc = tls.Server(c.nc, s.opts.TLSConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(s.opts.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
// Force handshake
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Debugf("TLS handshake error: %v", err)
c.sendErr("Secure Connection - TLS Required")
c.closeConnection()
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
}
// The connection may have been closed
if c.nc == nil {
c.mu.Unlock()
return c
}
if tlsRequired {
// Rewrap bw
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
}
// Do final client initialization
// Set the Ping timer
c.setPingTimer()
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop() })
if tlsRequired {
c.Debugf("TLS handshake complete")
cs := c.nc.(*tls.Conn).ConnectionState()
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
}
c.mu.Unlock()
return c
}
// updateServerINFO updates the server's Info object with the given
// array of URLs and re-generate the infoJSON byte array, only if the
// given URLs were not already recorded and if the feature is not
// disabled.
// Returns a boolean indicating if server's Info was updated.
func (s *Server) updateServerINFO(urls []string) bool {
s.mu.Lock()
defer s.mu.Unlock()
// Feature disabled, do not update.
if s.opts.ClusterNoAdvertise {
return false
}
// Will be set to true if we alter the server's Info object.
wasUpdated := false
for _, url := range urls {
if _, present := s.info.clientConnectURLs[url]; !present {
s.info.clientConnectURLs[url] = struct{}{}
s.info.ClientConnectURLs = append(s.info.ClientConnectURLs, url)
wasUpdated = true
}
}
if wasUpdated {
s.generateServerInfoJSON()
}
return wasUpdated
}
// Handle closing down a connection when the handshake has timedout.
func tlsTimeout(c *client, conn *tls.Conn) {
c.mu.Lock()
nc := c.nc
c.mu.Unlock()
// Check if already closed
if nc == nil {
return
}
cs := conn.ConnectionState()
if !cs.HandshakeComplete {
c.Debugf("TLS handshake timeout")
c.sendErr("Secure Connection - TLS Required")
c.closeConnection()
}
}
// Seems silly we have to write these
func tlsVersion(ver uint16) string {
switch ver {
case tls.VersionTLS10:
return "1.0"
case tls.VersionTLS11:
return "1.1"
case tls.VersionTLS12:
return "1.2"
}
return fmt.Sprintf("Unknown [%x]", ver)
}
// We use hex here so we don't need multiple versions
func tlsCipher(cs uint16) string {
switch cs {
case 0x0005:
return "TLS_RSA_WITH_RC4_128_SHA"
case 0x000a:
return "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
case 0x002f:
return "TLS_RSA_WITH_AES_128_CBC_SHA"
case 0x0035:
return "TLS_RSA_WITH_AES_256_CBC_SHA"
case 0xc007:
return "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA"
case 0xc009:
return "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
case 0xc00a:
return "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
case 0xc011:
return "TLS_ECDHE_RSA_WITH_RC4_128_SHA"
case 0xc012:
return "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA"
case 0xc013:
return "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"
case 0xc014:
return "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
case 0xc02f:
return "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
case 0xc02b:
return "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
case 0xc030:
return "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
case 0xc02c:
return "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
}
return fmt.Sprintf("Unknown [%x]", cs)
}
func (s *Server) checkClientAuth(c *client) bool {
if s.cAuth == nil {
return true
}
return s.cAuth.Check(c)
}
func (s *Server) checkRouterAuth(c *client) bool {
if s.rAuth == nil {
return true
}
return s.rAuth.Check(c)
}
// Check auth and return boolean indicating if client is ok
func (s *Server) checkAuth(c *client) bool {
switch c.typ {
case CLIENT:
return s.checkClientAuth(c)
case ROUTER:
return s.checkRouterAuth(c)
default:
return false
}
}
// Remove a client or route from our internal accounting.
func (s *Server) removeClient(c *client) {
var rID string
c.mu.Lock()
cid := c.cid
typ := c.typ
r := c.route
if r != nil {
rID = r.remoteID
}
updateProtoInfoCount := false
if typ == CLIENT && c.opts.Protocol >= ClientProtoInfo {
updateProtoInfoCount = true
}
c.mu.Unlock()
s.mu.Lock()
switch typ {
case CLIENT:
delete(s.clients, cid)
if updateProtoInfoCount {
s.cproto--
}
case ROUTER:
delete(s.routes, cid)
if r != nil {
rc, ok := s.remotes[rID]
// Only delete it if it is us..
if ok && c == rc {
delete(s.remotes, rID)
}
}
}
s.mu.Unlock()
}
/////////////////////////////////////////////////////////////////
// These are some helpers for accounting in functional tests.
/////////////////////////////////////////////////////////////////
// NumRoutes will report the number of registered routes.
func (s *Server) NumRoutes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.routes)
}
// NumRemotes will report number of registered remotes.
func (s *Server) NumRemotes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.remotes)
}
// NumClients will report the number of registered clients.
func (s *Server) NumClients() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.clients)
}
// NumSubscriptions will report how many subscriptions are active.
func (s *Server) NumSubscriptions() uint32 {
s.mu.Lock()
subs := s.sl.Count()
s.mu.Unlock()
return subs
}
// Addr will return the net.Addr object for the current listener.
func (s *Server) Addr() net.Addr {
s.mu.Lock()
defer s.mu.Unlock()
if s.listener == nil {
return nil
}
return s.listener.Addr()
}
// GetListenEndpoint will return a string of the form host:port suitable for
// a connect. Will return empty string if the server is not ready to accept
// client connections.
func (s *Server) GetListenEndpoint() string {
s.mu.Lock()
defer s.mu.Unlock()
// Wait for the listener to be set, see note about RANDOM_PORT below
if s.listener == nil {
return ""
}
host := s.opts.Host
// On windows, a connect with host "0.0.0.0" (or "::") will fail.
// We replace it with "localhost" when that's the case.
if host == "0.0.0.0" || host == "::" || host == "[::]" {
host = "localhost"
}
// Return the opts's Host and Port. Note that the Port may be set
// when the listener is started, due to the use of RANDOM_PORT
return net.JoinHostPort(host, strconv.Itoa(s.opts.Port))
}
// GetRouteListenEndpoint will return a string of the form host:port suitable
// for a connect. Will return empty string if the server is not configured for
// routing or not ready to accept route connections.
func (s *Server) GetRouteListenEndpoint() string {
s.mu.Lock()
defer s.mu.Unlock()
if s.routeListener == nil {
return ""
}
host := s.opts.ClusterHost
// On windows, a connect with host "0.0.0.0" (or "::") will fail.
// We replace it with "localhost" when that's the case.
if host == "0.0.0.0" || host == "::" || host == "[::]" {
host = "localhost"
}
// Return the cluster's Host and Port.
return net.JoinHostPort(host, strconv.Itoa(s.opts.ClusterPort))
}
// ID returns the server's ID
func (s *Server) ID() string {
s.mu.Lock()
defer s.mu.Unlock()
return s.info.ID
}
func (s *Server) startGoRoutine(f func()) {
s.grMu.Lock()
if s.grRunning {
s.grWG.Add(1)
go f()
}
s.grMu.Unlock()
}
// getClientConnectURLs returns suitable URLs for clients to connect to the listen
// port based on the server options' Host and Port. If the Host corresponds to
// "any" interfaces, this call returns the list of resolved IP addresses.
func (s *Server) getClientConnectURLs() []string {
s.mu.Lock()
defer s.mu.Unlock()
sPort := strconv.Itoa(s.opts.Port)
urls := make([]string, 0, 1)
ipAddr, err := net.ResolveIPAddr("ip", s.opts.Host)
// If the host is "any" (0.0.0.0 or ::), get specific IPs from available
// interfaces.
if err == nil && ipAddr.IP.IsUnspecified() {
var ip net.IP
ifaces, _ := net.Interfaces()
for _, i := range ifaces {
addrs, _ := i.Addrs()
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
// Skip non global unicast addresses
if !ip.IsGlobalUnicast() || ip.IsUnspecified() {
ip = nil
continue
}
urls = append(urls, net.JoinHostPort(ip.String(), sPort))
}
}
}
if err != nil || len(urls) == 0 {
// We are here if s.opts.Host is not "0.0.0.0" nor "::", or if for some
// reason we could not add any URL in the loop above.
// We had a case where a Windows VM was hosed and would have err == nil
// and not add any address in the array in the loop above, and we
// ended-up returning 0.0.0.0, which is problematic for Windows clients.
// Check for 0.0.0.0 or :: specifically, and ignore if that's the case.
if s.opts.Host == "0.0.0.0" || s.opts.Host == "::" {
Errorf("Address %q can not be resolved properly", s.opts.Host)
} else {
urls = append(urls, net.JoinHostPort(s.opts.Host, sPort))
}
}
return urls
}
| 1 | 6,727 | If we have added to s.clients, we could just do the following since if its a route will be ok I think. s.mu.Lock() defer s.mu.Unlock() return len(s.clients) <+ s.opts.MaxConn | nats-io-nats-server | go |
@@ -53,7 +53,10 @@ void BalanceTask::invoke() {
LOG(INFO) << taskIdStr_ << "Ask the src to give up the leadership.";
SAVE_STATE();
if (srcLived_) {
- client_->transLeader(spaceId_, partId_, src_).thenValue([this](auto&& resp) {
+ // For balance task of single replica, the target leader is src_ itself
+ HostAddr targetLeader = singleReplica_ ? src_ : kRandomPeer;
+ client_->transLeader(spaceId_, partId_, src_, targetLeader)
+ .thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Transfer leader failed, status " << resp;
if (resp == nebula::Status::PartNotFound()) { | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "meta/processors/admin/BalanceTask.h"
#include <folly/synchronization/Baton.h>
#include "meta/processors/Common.h"
namespace nebula {
namespace meta {
#define SAVE_STATE() \
if (!saveInStore()) { \
ret_ = Result::FAILED; \
onError_(); \
return; \
}
const std::string kBalanceTaskTable = "__b_task__"; // NOLINT
void BalanceTask::invoke() {
CHECK_NOTNULL(client_);
if (ret_ == Result::INVALID) {
endTimeMs_ = time::WallClock::fastNowInMilliSec();
saveInStore();
LOG(ERROR) << taskIdStr_ << "Task invalid, status " << static_cast<int32_t>(status_);
onFinished_();
return;
}
if (ret_ == Result::FAILED) {
endTimeMs_ = time::WallClock::fastNowInMilliSec();
saveInStore();
LOG(ERROR) << taskIdStr_ << "Task failed, status " << static_cast<int32_t>(status_);
onError_();
return;
}
if (ret_ == Result::SUCCEEDED) {
CHECK(status_ == Status::END);
onFinished_();
return;
}
switch (status_) {
case Status::START: {
LOG(INFO) << taskIdStr_ << "Start to move part!";
status_ = Status::CHANGE_LEADER;
ret_ = Result::IN_PROGRESS;
startTimeMs_ = time::WallClock::fastNowInMilliSec();
}
// fallthrough
case Status::CHANGE_LEADER: {
LOG(INFO) << taskIdStr_ << "Ask the src to give up the leadership.";
SAVE_STATE();
if (srcLived_) {
client_->transLeader(spaceId_, partId_, src_).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Transfer leader failed, status " << resp;
if (resp == nebula::Status::PartNotFound()) {
ret_ = Result::INVALID;
} else {
ret_ = Result::FAILED;
}
} else {
status_ = Status::ADD_PART_ON_DST;
}
invoke();
});
break;
} else {
LOG(INFO) << taskIdStr_ << "Src host has been lost, so no need to transfer leader";
status_ = Status::ADD_PART_ON_DST;
}
}
// fallthrough
case Status::ADD_PART_ON_DST: {
LOG(INFO) << taskIdStr_ << "Open the part as learner on dst.";
SAVE_STATE();
client_->addPart(spaceId_, partId_, dst_, true).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Open part failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::ADD_LEARNER;
}
invoke();
});
break;
}
case Status::ADD_LEARNER: {
LOG(INFO) << taskIdStr_ << "Add learner dst.";
SAVE_STATE();
client_->addLearner(spaceId_, partId_, dst_).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Add learner failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::CATCH_UP_DATA;
}
invoke();
});
break;
}
case Status::CATCH_UP_DATA: {
LOG(INFO) << taskIdStr_ << "Waiting for the data catch up.";
SAVE_STATE();
client_->waitingForCatchUpData(spaceId_, partId_, dst_).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Catchup data failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::MEMBER_CHANGE_ADD;
}
invoke();
});
break;
}
case Status::MEMBER_CHANGE_ADD: {
LOG(INFO) << taskIdStr_ << "Send member change request to the leader"
<< ", it will add the new member on dst host";
SAVE_STATE();
client_->memberChange(spaceId_, partId_, dst_, true).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Add peer failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::MEMBER_CHANGE_REMOVE;
}
invoke();
});
break;
}
case Status::MEMBER_CHANGE_REMOVE: {
LOG(INFO) << taskIdStr_ << "Send member change request to the leader"
<< ", it will remove the old member on src host";
SAVE_STATE();
client_->memberChange(spaceId_, partId_, src_, false).thenValue(
[this] (auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Remove peer failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::UPDATE_PART_META;
}
invoke();
});
break;
}
case Status::UPDATE_PART_META: {
LOG(INFO) << taskIdStr_ << "Update meta for part.";
SAVE_STATE();
client_->updateMeta(spaceId_, partId_, src_, dst_).thenValue(
[this] (auto&& resp) {
// The callback will be called inside raft set value. So don't call invoke directly
// here.
LOG(INFO) << "Update meta succeeded!";
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Update meta failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::REMOVE_PART_ON_SRC;
}
invoke();
});
break;
}
case Status::REMOVE_PART_ON_SRC: {
LOG(INFO) << taskIdStr_ << "Close part on src host, srcLived " << srcLived_;
SAVE_STATE();
if (srcLived_) {
client_->removePart(spaceId_, partId_, src_).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Remove part failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::CHECK;
}
invoke();
});
break;
} else {
LOG(INFO) << taskIdStr_ << "Don't remove part on src " << src_;
status_ = Status::CHECK;
}
}
// fallthrough
case Status::CHECK: {
LOG(INFO) << taskIdStr_ << "Check the peers...";
SAVE_STATE();
client_->checkPeers(spaceId_, partId_).thenValue([this] (auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Check the peers failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::END;
}
invoke();
});
break;
}
case Status::END: {
LOG(INFO) << taskIdStr_ << "Part has been moved successfully!";
endTimeMs_ = time::WallClock::fastNowInSec();
ret_ = Result::SUCCEEDED;
SAVE_STATE();
onFinished_();
break;
}
}
return;
}
void BalanceTask::rollback() {
if (status_ < Status::UPDATE_PART_META) {
// TODO(heng): restart the part on its peers.
} else {
// TODO(heng): Go on the task.
}
}
bool BalanceTask::saveInStore() {
if (kv_) {
std::vector<kvstore::KV> data;
data.emplace_back(taskKey(), taskVal());
folly::Baton<true, std::atomic> baton;
bool ret = false;
kv_->asyncMultiPut(kDefaultSpaceId,
kDefaultPartId,
std::move(data),
[this, &ret, &baton] (kvstore::ResultCode code) {
if (kvstore::ResultCode::SUCCEEDED == code) {
ret = true;
} else {
LOG(INFO) << taskIdStr_ << "Can't persist task!";
}
baton.post();
});
baton.wait();
return ret;
}
return true;
}
std::string BalanceTask::taskKey() {
std::string str;
str.reserve(64);
str.append(reinterpret_cast<const char*>(kBalanceTaskTable.data()), kBalanceTaskTable.size());
str.append(reinterpret_cast<const char*>(&balanceId_), sizeof(balanceId_));
str.append(reinterpret_cast<const char*>(&spaceId_), sizeof(spaceId_));
str.append(reinterpret_cast<const char*>(&partId_), sizeof(partId_));
str.append(reinterpret_cast<const char*>(&src_), sizeof(src_));
str.append(reinterpret_cast<const char*>(&dst_), sizeof(dst_));
return str;
}
std::string BalanceTask::taskVal() {
std::string str;
str.reserve(32);
str.append(reinterpret_cast<const char*>(&status_), sizeof(status_));
str.append(reinterpret_cast<const char*>(&ret_), sizeof(ret_));
str.append(reinterpret_cast<const char*>(&srcLived_), sizeof(srcLived_));
str.append(reinterpret_cast<const char*>(&startTimeMs_), sizeof(startTimeMs_));
str.append(reinterpret_cast<const char*>(&endTimeMs_), sizeof(endTimeMs_));
return str;
}
std::string BalanceTask::prefix(BalanceID balanceId) {
std::string str;
str.reserve(32);
str.append(reinterpret_cast<const char*>(kBalanceTaskTable.data()), kBalanceTaskTable.size());
str.append(reinterpret_cast<const char*>(&balanceId), sizeof(balanceId));
return str;
}
std::tuple<BalanceID, GraphSpaceID, PartitionID, HostAddr, HostAddr>
BalanceTask::parseKey(const folly::StringPiece& rawKey) {
int32_t offset = kBalanceTaskTable.size();
auto balanceId = *reinterpret_cast<const BalanceID*>(rawKey.begin() + offset);
offset += sizeof(balanceId);
auto spaceId = *reinterpret_cast<const GraphSpaceID*>(rawKey.begin() + offset);
offset += sizeof(GraphSpaceID);
auto partId = *reinterpret_cast<const PartitionID*>(rawKey.begin() + offset);
offset += sizeof(PartitionID);
auto src = *reinterpret_cast<const HostAddr*>(rawKey.begin() + offset);
offset += sizeof(HostAddr);
auto dst = *reinterpret_cast<const HostAddr*>(rawKey.begin() + offset);
return std::make_tuple(balanceId, spaceId, partId, src, dst);
}
std::tuple<BalanceTask::Status, BalanceTask::Result, bool, int64_t, int64_t>
BalanceTask::parseVal(const folly::StringPiece& rawVal) {
int32_t offset = 0;
auto status = *reinterpret_cast<const BalanceTask::Status*>(rawVal.begin() + offset);
offset += sizeof(BalanceTask::Status);
auto ret = *reinterpret_cast<const BalanceTask::Result*>(rawVal.begin() + offset);
offset += sizeof(BalanceTask::Result);
auto srcLived = *reinterpret_cast<const bool*>(rawVal.begin() + offset);
offset += sizeof(bool);
auto start = *reinterpret_cast<const int64_t*>(rawVal.begin() + offset);
offset += sizeof(int64_t);
auto end = *reinterpret_cast<const int64_t*>(rawVal.begin() + offset);
return std::make_tuple(status, ret, srcLived, start, end);
}
} // namespace meta
} // namespace nebula
| 1 | 28,424 | if the targetLeader is src_ itself, it is really need call transLeader function? | vesoft-inc-nebula | cpp |
@@ -85,6 +85,7 @@ public class CSharpSurfaceNamer extends SurfaceNamer {
@Override
public String getAndSavePagedResponseTypeName(
+ Method method,
FeatureConfig featureConfig,
ModelTypeTable typeTable,
TypeRef inputType, | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.csharp;
import com.google.api.codegen.ServiceMessages;
import com.google.api.codegen.config.CollectionConfig;
import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.transformer.FeatureConfig;
import com.google.api.codegen.transformer.ModelTypeFormatterImpl;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.transformer.SurfaceTransformerContext;
import com.google.api.codegen.transformer.Synchronicity;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.csharp.CSharpNameFormatter;
import com.google.api.codegen.util.csharp.CSharpTypeTable;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.TypeRef;
import com.google.common.collect.ImmutableList;
import java.util.List;
public class CSharpSurfaceNamer extends SurfaceNamer {
public CSharpSurfaceNamer(String implicitPackageName) {
super(
new CSharpNameFormatter(),
new ModelTypeFormatterImpl(new CSharpModelTypeNameConverter(implicitPackageName)),
new CSharpTypeTable(implicitPackageName));
}
@Override
public String getFullyQualifiedApiWrapperClassName(Interface service, String packageName) {
return packageName + "." + getApiWrapperClassName(service);
}
@Override
public String getStaticLangReturnTypeName(Method method, MethodConfig methodConfig) {
if (ServiceMessages.s_isEmptyType(method.getOutputType())) {
return "void";
}
return getModelTypeFormatter().getFullNameFor(method.getOutputType());
}
@Override
public String getStaticLangAsyncReturnTypeName(Method method, MethodConfig methodConfig) {
if (ServiceMessages.s_isEmptyType(method.getOutputType())) {
return "Task";
}
return "Task<" + getModelTypeFormatter().getFullNameFor(method.getOutputType()) + ">";
}
@Override
public String getApiWrapperClassName(Interface interfaze) {
return className(Name.upperCamel(interfaze.getSimpleName(), "Client"));
}
@Override
public String getCallableName(Method method) {
return privateFieldName(Name.upperCamel("Call", method.getSimpleName()));
}
@Override
public String getPathTemplateName(Interface service, CollectionConfig collectionConfig) {
return inittedConstantName(Name.from(collectionConfig.getEntityName(), "template"));
}
@Override
public String getFieldGetFunctionName(TypeRef type, Name identifier) {
return privateMethodName(identifier);
}
@Override
public String getAndSavePagedResponseTypeName(
FeatureConfig featureConfig,
ModelTypeTable typeTable,
TypeRef inputType,
TypeRef outputType,
Field resourceField) {
String inputTypeName = typeTable.getAndSaveNicknameForElementType(inputType);
String outputTypeName = typeTable.getAndSaveNicknameForElementType(outputType);
String resourceTypeName =
getAndSaveElementFieldTypeName(featureConfig, typeTable, resourceField);
return typeTable.getAndSaveNicknameForContainer(
"Google.Api.Gax.PagedEnumerable", inputTypeName, outputTypeName, resourceTypeName);
}
@Override
public String getGrpcContainerTypeName(Interface service) {
return className(Name.upperCamel(service.getSimpleName()));
}
@Override
public String getGrpcServiceClassName(Interface service) {
return className(Name.upperCamel(service.getSimpleName()))
+ "."
+ className(Name.upperCamel(service.getSimpleName(), "Client"));
}
@Override
public String getApiWrapperClassImplName(Interface interfaze) {
return className(Name.upperCamel(interfaze.getSimpleName(), "ClientImpl"));
}
@Override
public String getAsyncApiMethodName(Method method) {
return getApiMethodName(method) + "Async";
}
@Override
public String getPageStreamingDescriptorConstName(Method method) {
return inittedConstantName(Name.upperCamel(method.getSimpleName()));
}
@Override
public String getParamName(String var) {
return localVarName(Name.from(var).join("id"));
}
@Override
public List<String> getReturnDocLines(
SurfaceTransformerContext context, MethodConfig methodConfig, Synchronicity synchronicity) {
if (methodConfig.isPageStreaming()) {
TypeRef resourceType = methodConfig.getPageStreaming().getResourcesField().getType();
String resourceTypeName =
context.getTypeTable().getAndSaveNicknameForElementType(resourceType);
switch (synchronicity) {
case Sync:
return ImmutableList.of(
"A pageable sequence of <see cref=\"" + resourceTypeName + "\"/> resources.");
case Async:
return ImmutableList.of(
"A pageable asynchronous sequence of <see cref=\""
+ resourceTypeName
+ "\"/> resources.");
}
} else {
switch (synchronicity) {
case Sync:
return ImmutableList.of("The RPC response.");
case Async:
return ImmutableList.of("A Task containing the RPC response.");
}
}
throw new IllegalStateException("Invalid Synchronicity: " + synchronicity);
}
}
| 1 | 18,487 | Can we remove this featureConfig since you removed in L98 (assuming it is not used else where) | googleapis-gapic-generator | java |
@@ -11,6 +11,7 @@ using Newtonsoft.Json;
using Xunit;
using System.Collections.Generic;
+using System.Linq;
namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
{ | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.IO;
using System.Reflection;
using Microsoft.CodeAnalysis.Sarif.Sdk;
using Microsoft.CodeAnalysis.Sarif.Readers;
using Newtonsoft.Json;
using Xunit;
using System.Collections.Generic;
namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
{
public class AnalyzeCommandBaseTests
{
private void ExceptionTestHelper(
ExceptionCondition exceptionCondition,
RuntimeConditions runtimeConditions,
ExitReason expectedExitReason = ExitReason.None,
TestAnalyzeOptions analyzeOptions = null)
{
ExceptionRaisingRule.s_exceptionCondition = exceptionCondition;
analyzeOptions = analyzeOptions ?? new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[0]
};
var command = new TestAnalyzeCommand();
Assembly[] plugInAssemblies = null;
if (analyzeOptions.DefaultPlugInFilePaths != null)
{
var assemblies = new List<Assembly>();
foreach (string plugInFilePath in analyzeOptions.DefaultPlugInFilePaths)
{
assemblies.Add(Assembly.LoadFrom(plugInFilePath));
}
plugInAssemblies = new Assembly[assemblies.Count];
assemblies.CopyTo(plugInAssemblies, 0);
}
else
{
plugInAssemblies = new Assembly[] { typeof(ExceptionRaisingRule).Assembly };
}
command.DefaultPlugInAssemblies = plugInAssemblies;
int result = command.Run(analyzeOptions);
int expectedResult =
(runtimeConditions & RuntimeConditions.Fatal) == RuntimeConditions.NoErrors ?
TestAnalyzeCommand.SUCCESS : TestAnalyzeCommand.FAILURE;
Assert.Equal(runtimeConditions, command.RuntimeErrors);
Assert.Equal(expectedResult, result);
if (expectedExitReason != ExitReason.None)
{
Assert.NotNull(command.ExecutionException);
if (expectedExitReason != ExitReason.UnhandledExceptionInEngine)
{
var eax = command.ExecutionException as ExitApplicationException<ExitReason>;
Assert.NotNull(eax);
}
}
else
{
Assert.Null(command.ExecutionException);
}
ExceptionRaisingRule.s_exceptionCondition = ExceptionCondition.None;
}
[Fact]
public void InvalidCommandLineOption()
{
var options = new TestAnalyzeOptions
{
RegardOptionsAsInvalid = true
};
ExceptionTestHelper(
ExceptionCondition.ValidatingOptions,
RuntimeConditions.InvalidCommandLineOption,
ExitReason.InvalidCommandLineOption,
options);
}
[Fact]
public void NotApplicableToTarget()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
RegardAnalysisTargetAsNotApplicable = true
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.RuleNotApplicableToTarget,
analyzeOptions: options);
}
[Fact]
public void InvalidTarget()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
RegardAnalysisTargetAsValid = false
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.TargetNotValidToAnalyze,
analyzeOptions: options);
}
[Fact]
public void MissingRequiredConfiguration()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
RegardRequiredConfigurationAsMissing = true
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.RuleMissingRequiredConfiguration,
analyzeOptions: options);
}
[Fact]
public void ExceptionLoadingTarget()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
RegardAnalysisTargetAsCorrupted = true
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.ExceptionLoadingTargetFile,
analyzeOptions: options);
}
[Fact]
public void ExceptionRaisedInstantiatingSkimmers()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.InvokingConstructor,
RuntimeConditions.ExceptionInstantiatingSkimmers,
ExitReason.UnhandledExceptionInstantiatingSkimmers,
analyzeOptions : options);
}
[Fact]
public void NoRulesLoaded()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
DefaultPlugInFilePaths = new string[] { typeof(string).Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.NoRulesLoaded,
ExitReason.NoRulesLoaded,
analyzeOptions : options
);
}
[Fact]
public void NoValidAnalysisTargets()
{
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.NoValidAnalysisTargets,
ExitReason.NoValidAnalysisTargets
);
}
[Fact]
public void ExceptionRaisedInvokingInitialize()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.InvokingInitialize,
RuntimeConditions.ExceptionInSkimmerInitialize,
analyzeOptions: options
);
}
[Fact]
public void LoadPdbException()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.LoadingPdb,
RuntimeConditions.ExceptionLoadingPdb,
analyzeOptions: options
);
}
[Fact]
public void FileUri()
{
Uri uri = new Uri(this.GetType().Assembly.Location);
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { uri.ToString() },
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.NoErrors,
analyzeOptions: options
);
}
[Fact]
public void ParseTargetException()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.ParsingTarget,
RuntimeConditions.TargetParseError,
analyzeOptions: options
);
}
[Fact]
public void ExceptionRaisedInvokingCanAnalyze()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.InvokingCanAnalyze,
RuntimeConditions.ExceptionRaisedInSkimmerCanAnalyze,
analyzeOptions: options
);
}
[Fact]
public void ExceptionRaisedInvokingAnalyze()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.InvokingAnalyze,
RuntimeConditions.ExceptionInSkimmerAnalyze,
analyzeOptions: options
);
}
[Fact]
public void ExceptionRaisedInEngine()
{
TestAnalyzeCommand.RaiseUnhandledExceptionInDriverCode = true;
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.ExceptionInEngine,
ExitReason.UnhandledExceptionInEngine,
analyzeOptions : options);
TestAnalyzeCommand.RaiseUnhandledExceptionInDriverCode = false;
}
[Fact]
public void IOExceptionRaisedCreatingSarifLog()
{
string path = Path.GetTempFileName();
try
{
using (var stream = File.OpenWrite(path))
{
// our log file is locked for write
// causing exceptions at analysis time
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
OutputFilePath = path,
Verbose = true,
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.ExceptionCreatingLogfile,
expectedExitReason: ExitReason.ExceptionCreatingLogFile,
analyzeOptions: options);
}
}
finally
{
File.Delete(path);
}
}
[Fact]
public void UnauthorizedAccessExceptionCreatingSarifLog()
{
string path = Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData);
path = Path.Combine(path, Guid.NewGuid().ToString());
using (var stream = File.Create(path, 1, FileOptions.DeleteOnClose))
{
// attempt to persist to unauthorized location will raise exception
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
OutputFilePath = path,
Verbose = true,
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.ExceptionCreatingLogfile,
expectedExitReason: ExitReason.ExceptionCreatingLogFile,
analyzeOptions: options);
}
}
[Fact]
public void MissingConfigurationFile()
{
string path = Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData);
path = Path.Combine(path, Guid.NewGuid().ToString());
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
ConfigurationFilePath = path,
Verbose = true,
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.MissingFile,
expectedExitReason: ExitReason.InvalidCommandLineOption,
analyzeOptions: options);
}
[Fact]
public void MissingPlugInFile()
{
string path = Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData);
path = Path.Combine(path, Guid.NewGuid().ToString());
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
PlugInFilePaths = new string[] { path },
Verbose = true,
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.MissingFile,
expectedExitReason: ExitReason.InvalidCommandLineOption,
analyzeOptions: options);
}
[Fact]
public void MissingOutputFile()
{
string path = Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData);
path = Path.Combine(path, Guid.NewGuid().ToString());
try {
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
OutputFilePath = path,
Verbose = true,
};
// A missing output file is a good condition. :)
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.NoErrors,
expectedExitReason: ExitReason.None,
analyzeOptions: options);
}
finally
{
if (File.Exists(path)) { File.Delete(path); }
}
}
public Run AnalyzeFile(string fileName)
{
string path = Path.GetTempFileName();
Run run = null;
try
{
var options = new TestAnalyzeOptions
{
TargetFileSpecifiers = new string[] { fileName },
Verbose = true,
Statistics = true,
ComputeTargetsHash = true,
ConfigurationFilePath = TestAnalyzeCommand.DEFAULT_POLICY_NAME,
Recurse = true,
OutputFilePath = path,
};
var command = new TestAnalyzeCommand();
command.DefaultPlugInAssemblies = new Assembly[] { this.GetType().Assembly };
int result = command.Run(options);
Assert.Equal(TestAnalyzeCommand.SUCCESS, result);
JsonSerializerSettings settings = new JsonSerializerSettings()
{
ContractResolver = SarifContractResolver.Instance
};
SarifLog log = JsonConvert.DeserializeObject<SarifLog>(File.ReadAllText(path), settings);
Assert.NotNull(log);
Assert.Equal<int>(1, log.Runs.Count);
run = log.Runs[0];
}
finally
{
File.Delete(path);
}
return run;
}
[Fact]
public void AnalyzeCommand_EndToEndAnalysisWithNoIssues()
{
Run run = AnalyzeFile(this.GetType().Assembly.Location);
int resultCount = 0;
SarifHelpers.ValidateRun(run, (issue) => { resultCount++; });
Assert.Equal(1, resultCount);
}
}
} | 1 | 10,560 | namespaces in this file need a sorting | microsoft-sarif-sdk | .cs |
@@ -942,7 +942,7 @@ func ConfigureDefaultMTUs(hostMTU int, c *Config) {
c.VXLANMTU = hostMTU - vxlanMTUOverhead
}
if c.Wireguard.MTU == 0 {
- if c.KubernetesProvider == config.ProviderAKS && c.RouteSource == "WorkloadIPs" {
+ if c.Wireguard.EncryptHostTraffic {
// The default MTU on Azure is 1500, but the underlying network stack will fragment packets at 1400 bytes,
// see https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu
// for details. | 1 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intdataplane
import (
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"k8s.io/client-go/kubernetes"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/arp"
"github.com/projectcalico/felix/bpf/conntrack"
"github.com/projectcalico/felix/bpf/failsafes"
bpfipsets "github.com/projectcalico/felix/bpf/ipsets"
"github.com/projectcalico/felix/bpf/nat"
bpfproxy "github.com/projectcalico/felix/bpf/proxy"
"github.com/projectcalico/felix/bpf/routes"
"github.com/projectcalico/felix/bpf/state"
"github.com/projectcalico/felix/bpf/tc"
"github.com/projectcalico/felix/config"
"github.com/projectcalico/felix/idalloc"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/jitter"
"github.com/projectcalico/felix/labelindex"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/routetable"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/felix/throttle"
"github.com/projectcalico/felix/wireguard"
"github.com/projectcalico/libcalico-go/lib/health"
lclogutils "github.com/projectcalico/libcalico-go/lib/logutils"
cprometheus "github.com/projectcalico/libcalico-go/lib/prometheus"
"github.com/projectcalico/libcalico-go/lib/set"
)
const (
// msgPeekLimit is the maximum number of messages we'll try to grab from the to-dataplane
// channel before we apply the changes. Higher values allow us to batch up more work on
// the channel for greater throughput when we're under load (at cost of higher latency).
msgPeekLimit = 100
// Interface name used by kube-proxy to bind service ips.
KubeIPVSInterface = "kube-ipvs0"
)
var (
countDataplaneSyncErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_int_dataplane_failures",
Help: "Number of times dataplane updates failed and will be retried.",
})
countMessages = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "felix_int_dataplane_messages",
Help: "Number dataplane messages by type.",
}, []string{"type"})
summaryApplyTime = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_apply_time_seconds",
Help: "Time in seconds that it took to apply a dataplane update.",
})
summaryBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_msg_batch_size",
Help: "Number of messages processed in each batch. Higher values indicate we're " +
"doing more batching to try to keep up.",
})
summaryIfaceBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_iface_msg_batch_size",
Help: "Number of interface state messages processed in each batch. Higher " +
"values indicate we're doing more batching to try to keep up.",
})
summaryAddrBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_addr_msg_batch_size",
Help: "Number of interface address messages processed in each batch. Higher " +
"values indicate we're doing more batching to try to keep up.",
})
processStartTime time.Time
zeroKey = wgtypes.Key{}
)
func init() {
prometheus.MustRegister(countDataplaneSyncErrors)
prometheus.MustRegister(summaryApplyTime)
prometheus.MustRegister(countMessages)
prometheus.MustRegister(summaryBatchSize)
prometheus.MustRegister(summaryIfaceBatchSize)
prometheus.MustRegister(summaryAddrBatchSize)
processStartTime = time.Now()
}
type Config struct {
Hostname string
IPv6Enabled bool
RuleRendererOverride rules.RuleRenderer
IPIPMTU int
VXLANMTU int
VXLANPort int
MaxIPSetSize int
IptablesBackend string
IPSetsRefreshInterval time.Duration
RouteRefreshInterval time.Duration
DeviceRouteSourceAddress net.IP
DeviceRouteProtocol int
RemoveExternalRoutes bool
IptablesRefreshInterval time.Duration
IptablesPostWriteCheckInterval time.Duration
IptablesInsertMode string
IptablesLockFilePath string
IptablesLockTimeout time.Duration
IptablesLockProbeInterval time.Duration
XDPRefreshInterval time.Duration
Wireguard wireguard.Config
NetlinkTimeout time.Duration
RulesConfig rules.Config
IfaceMonitorConfig ifacemonitor.Config
StatusReportingInterval time.Duration
ConfigChangedRestartCallback func()
FatalErrorRestartCallback func(error)
PostInSyncCallback func()
HealthAggregator *health.HealthAggregator
RouteTableManager *idalloc.IndexAllocator
DebugSimulateDataplaneHangAfter time.Duration
ExternalNodesCidrs []string
BPFEnabled bool
BPFDisableUnprivileged bool
BPFKubeProxyIptablesCleanupEnabled bool
BPFLogLevel string
BPFExtToServiceConnmark int
BPFDataIfacePattern *regexp.Regexp
XDPEnabled bool
XDPAllowGeneric bool
BPFConntrackTimeouts conntrack.Timeouts
BPFCgroupV2 string
BPFConnTimeLBEnabled bool
BPFMapRepin bool
BPFNodePortDSREnabled bool
KubeProxyMinSyncPeriod time.Duration
KubeProxyEndpointSlicesEnabled bool
SidecarAccelerationEnabled bool
LookPathOverride func(file string) (string, error)
KubeClientSet *kubernetes.Clientset
FeatureDetectOverrides map[string]string
// Populated with the smallest host MTU based on auto-detection.
hostMTU int
MTUIfacePattern *regexp.Regexp
RouteSource string
KubernetesProvider config.Provider
}
type UpdateBatchResolver interface {
// Opportunity for a manager component to resolve state that depends jointly on the updates
// that it has seen since the preceding CompleteDeferredWork call. Processing here can
// include passing resolved state to other managers. It should not include any actual
// dataplane updates yet. (Those should be actioned in CompleteDeferredWork.)
ResolveUpdateBatch() error
}
// InternalDataplane implements an in-process Felix dataplane driver based on iptables
// and ipsets. It communicates with the datastore-facing part of Felix via the
// Send/RecvMessage methods, which operate on the protobuf-defined API objects.
//
// Architecture
//
// The internal dataplane driver is organised around a main event loop, which handles
// update events from the datastore and dataplane.
//
// Each pass around the main loop has two phases. In the first phase, updates are fanned
// out to "manager" objects, which calculate the changes that are needed and pass them to
// the dataplane programming layer. In the second phase, the dataplane layer applies the
// updates in a consistent sequence. The second phase is skipped until the datastore is
// in sync; this ensures that the first update to the dataplane applies a consistent
// snapshot.
//
// Having the dataplane layer batch updates has several advantages. It is much more
// efficient to batch updates, since each call to iptables/ipsets has a high fixed cost.
// In addition, it allows for different managers to make updates without having to
// coordinate on their sequencing.
//
// Requirements on the API
//
// The internal dataplane does not do consistency checks on the incoming data (as the
// old Python-based driver used to do). It expects to be told about dependent resources
// before they are needed and for their lifetime to exceed that of the resources that
// depend on them. For example, it is important the the datastore layer send an
// IP set create event before it sends a rule that references that IP set.
type InternalDataplane struct {
toDataplane chan interface{}
fromDataplane chan interface{}
allIptablesTables []*iptables.Table
iptablesMangleTables []*iptables.Table
iptablesNATTables []*iptables.Table
iptablesRawTables []*iptables.Table
iptablesFilterTables []*iptables.Table
ipSets []ipsetsDataplane
ipipManager *ipipManager
wireguardManager *wireguardManager
ifaceMonitor *ifacemonitor.InterfaceMonitor
ifaceUpdates chan *ifaceUpdate
ifaceAddrUpdates chan *ifaceAddrsUpdate
endpointStatusCombiner *endpointStatusCombiner
allManagers []Manager
managersWithRouteTables []ManagerWithRouteTables
ruleRenderer rules.RuleRenderer
// dataplaneNeedsSync is set if the dataplane is dirty in some way, i.e. we need to
// call apply().
dataplaneNeedsSync bool
// forceIPSetsRefresh is set by the IP sets refresh timer to indicate that we should
// check the IP sets in the dataplane.
forceIPSetsRefresh bool
// forceRouteRefresh is set by the route refresh timer to indicate that we should
// check the routes in the dataplane.
forceRouteRefresh bool
// forceXDPRefresh is set by the XDP refresh timer to indicate that we should
// check the XDP state in the dataplane.
forceXDPRefresh bool
// doneFirstApply is set after we finish the first update to the dataplane. It indicates
// that the dataplane should now be in sync.
doneFirstApply bool
reschedTimer *time.Timer
reschedC <-chan time.Time
applyThrottle *throttle.Throttle
config Config
debugHangC <-chan time.Time
xdpState *xdpState
sockmapState *sockmapState
endpointsSourceV4 endpointsSource
ipsetsSourceV4 ipsetsSource
callbacks *callbacks
loopSummarizer *logutils.Summarizer
}
const (
healthName = "int_dataplane"
healthInterval = 10 * time.Second
ipipMTUOverhead = 20
vxlanMTUOverhead = 50
wireguardMTUOverhead = 60
aksMTUOverhead = 100
)
func NewIntDataplaneDriver(config Config) *InternalDataplane {
log.WithField("config", config).Info("Creating internal dataplane driver.")
ruleRenderer := config.RuleRendererOverride
if ruleRenderer == nil {
ruleRenderer = rules.NewRenderer(config.RulesConfig)
}
epMarkMapper := rules.NewEndpointMarkMapper(
config.RulesConfig.IptablesMarkEndpoint,
config.RulesConfig.IptablesMarkNonCaliEndpoint)
// Auto-detect host MTU.
hostMTU, err := findHostMTU(config.MTUIfacePattern)
if err != nil {
log.WithError(err).Fatal("Unable to detect host MTU, shutting down")
return nil
}
ConfigureDefaultMTUs(hostMTU, &config)
podMTU := determinePodMTU(config)
if err := writeMTUFile(podMTU); err != nil {
log.WithError(err).Error("Failed to write MTU file, pod MTU may not be properly set")
}
dp := &InternalDataplane{
toDataplane: make(chan interface{}, msgPeekLimit),
fromDataplane: make(chan interface{}, 100),
ruleRenderer: ruleRenderer,
ifaceMonitor: ifacemonitor.New(config.IfaceMonitorConfig, config.FatalErrorRestartCallback),
ifaceUpdates: make(chan *ifaceUpdate, 100),
ifaceAddrUpdates: make(chan *ifaceAddrsUpdate, 100),
config: config,
applyThrottle: throttle.New(10),
loopSummarizer: logutils.NewSummarizer("dataplane reconciliation loops"),
}
dp.applyThrottle.Refill() // Allow the first apply() immediately.
dp.ifaceMonitor.StateCallback = dp.onIfaceStateChange
dp.ifaceMonitor.AddrCallback = dp.onIfaceAddrsChange
backendMode := iptables.DetectBackend(config.LookPathOverride, iptables.NewRealCmd, config.IptablesBackend)
// Most iptables tables need the same options.
iptablesOptions := iptables.TableOptions{
HistoricChainPrefixes: rules.AllHistoricChainNamePrefixes,
InsertMode: config.IptablesInsertMode,
RefreshInterval: config.IptablesRefreshInterval,
PostWriteInterval: config.IptablesPostWriteCheckInterval,
LockTimeout: config.IptablesLockTimeout,
LockProbeInterval: config.IptablesLockProbeInterval,
BackendMode: backendMode,
LookPathOverride: config.LookPathOverride,
OnStillAlive: dp.reportHealth,
OpRecorder: dp.loopSummarizer,
}
if config.BPFEnabled && config.BPFKubeProxyIptablesCleanupEnabled {
// If BPF-mode is enabled, clean up kube-proxy's rules too.
log.Info("BPF enabled, configuring iptables layer to clean up kube-proxy's rules.")
iptablesOptions.ExtraCleanupRegexPattern = rules.KubeProxyInsertRuleRegex
iptablesOptions.HistoricChainPrefixes = append(iptablesOptions.HistoricChainPrefixes, rules.KubeProxyChainPrefixes...)
}
// However, the NAT tables need an extra cleanup regex.
iptablesNATOptions := iptablesOptions
if iptablesNATOptions.ExtraCleanupRegexPattern == "" {
iptablesNATOptions.ExtraCleanupRegexPattern = rules.HistoricInsertedNATRuleRegex
} else {
iptablesNATOptions.ExtraCleanupRegexPattern += "|" + rules.HistoricInsertedNATRuleRegex
}
featureDetector := iptables.NewFeatureDetector(config.FeatureDetectOverrides)
iptablesFeatures := featureDetector.GetFeatures()
var iptablesLock sync.Locker
if iptablesFeatures.RestoreSupportsLock {
log.Debug("Calico implementation of iptables lock disabled (because detected version of " +
"iptables-restore will use its own implementation).")
iptablesLock = dummyLock{}
} else if config.IptablesLockTimeout <= 0 {
log.Debug("Calico implementation of iptables lock disabled (by configuration).")
iptablesLock = dummyLock{}
} else {
// Create the shared iptables lock. This allows us to block other processes from
// manipulating iptables while we make our updates. We use a shared lock because we
// actually do multiple updates in parallel (but to different tables), which is safe.
log.WithField("timeout", config.IptablesLockTimeout).Debug(
"Calico implementation of iptables lock enabled")
iptablesLock = iptables.NewSharedLock(
config.IptablesLockFilePath,
config.IptablesLockTimeout,
config.IptablesLockProbeInterval,
)
}
mangleTableV4 := iptables.NewTable(
"mangle",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
natTableV4 := iptables.NewTable(
"nat",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesNATOptions,
)
rawTableV4 := iptables.NewTable(
"raw",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
filterTableV4 := iptables.NewTable(
"filter",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
ipSetsConfigV4 := config.RulesConfig.IPSetConfigV4
ipSetsV4 := ipsets.NewIPSets(ipSetsConfigV4, dp.loopSummarizer)
dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV4)
dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV4)
dp.iptablesMangleTables = append(dp.iptablesMangleTables, mangleTableV4)
dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV4)
dp.ipSets = append(dp.ipSets, ipSetsV4)
if config.RulesConfig.VXLANEnabled {
routeTableVXLAN := routetable.New([]string{"^vxlan.calico$"}, 4, true, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, true, 0,
dp.loopSummarizer)
vxlanManager := newVXLANManager(
ipSetsV4,
routeTableVXLAN,
"vxlan.calico",
config,
dp.loopSummarizer,
)
go vxlanManager.KeepVXLANDeviceInSync(config.VXLANMTU, iptablesFeatures.ChecksumOffloadBroken, 10*time.Second)
dp.RegisterManager(vxlanManager)
} else {
cleanUpVXLANDevice()
}
dp.endpointStatusCombiner = newEndpointStatusCombiner(dp.fromDataplane, config.IPv6Enabled)
callbacks := newCallbacks()
dp.callbacks = callbacks
if !config.BPFEnabled && config.XDPEnabled {
if err := bpf.SupportsXDP(); err != nil {
log.WithError(err).Warn("Can't enable XDP acceleration.")
} else {
st, err := NewXDPState(config.XDPAllowGeneric)
if err != nil {
log.WithError(err).Warn("Can't enable XDP acceleration.")
} else {
dp.xdpState = st
dp.xdpState.PopulateCallbacks(callbacks)
dp.RegisterManager(st)
log.Info("XDP acceleration enabled.")
}
}
} else {
log.Info("XDP acceleration disabled.")
}
// TODO Integrate XDP and BPF infra.
if !config.BPFEnabled && dp.xdpState == nil {
xdpState, err := NewXDPState(config.XDPAllowGeneric)
if err == nil {
if err := xdpState.WipeXDP(); err != nil {
log.WithError(err).Warn("Failed to cleanup preexisting XDP state")
}
}
// if we can't create an XDP state it means we couldn't get a working
// bpffs so there's nothing to clean up
}
if config.SidecarAccelerationEnabled {
if err := bpf.SupportsSockmap(); err != nil {
log.WithError(err).Warn("Can't enable Sockmap acceleration.")
} else {
st, err := NewSockmapState()
if err != nil {
log.WithError(err).Warn("Can't enable Sockmap acceleration.")
} else {
dp.sockmapState = st
dp.sockmapState.PopulateCallbacks(callbacks)
if err := dp.sockmapState.SetupSockmapAcceleration(); err != nil {
dp.sockmapState = nil
log.WithError(err).Warn("Failed to set up Sockmap acceleration")
} else {
log.Info("Sockmap acceleration enabled.")
}
}
}
}
if dp.sockmapState == nil {
st, err := NewSockmapState()
if err == nil {
st.WipeSockmap(bpf.FindInBPFFSOnly)
}
// if we can't create a sockmap state it means we couldn't get a working
// bpffs so there's nothing to clean up
}
if !config.BPFEnabled {
// BPF mode disabled, create the iptables-only managers.
ipsetsManager := newIPSetsManager(ipSetsV4, config.MaxIPSetSize)
dp.RegisterManager(ipsetsManager)
dp.ipsetsSourceV4 = ipsetsManager
// TODO Connect host IP manager to BPF
dp.RegisterManager(newHostIPManager(
config.RulesConfig.WorkloadIfacePrefixes,
rules.IPSetIDThisHostIPs,
ipSetsV4,
config.MaxIPSetSize))
dp.RegisterManager(newPolicyManager(rawTableV4, mangleTableV4, filterTableV4, ruleRenderer, 4))
// Clean up any leftover BPF state.
err := nat.RemoveConnectTimeLoadBalancer("")
if err != nil {
log.WithError(err).Info("Failed to remove BPF connect-time load balancer, ignoring.")
}
tc.CleanUpProgramsAndPins()
}
interfaceRegexes := make([]string, len(config.RulesConfig.WorkloadIfacePrefixes))
for i, r := range config.RulesConfig.WorkloadIfacePrefixes {
interfaceRegexes[i] = "^" + r + ".*"
}
bpfMapContext := &bpf.MapContext{
RepinningEnabled: config.BPFMapRepin,
}
var (
bpfEndpointManager *bpfEndpointManager
)
if config.BPFEnabled {
log.Info("BPF enabled, starting BPF endpoint manager and map manager.")
// Register map managers first since they create the maps that will be used by the endpoint manager.
// Important that we create the maps before we load a BPF program with TC since we make sure the map
// metadata name is set whereas TC doesn't set that field.
ipSetIDAllocator := idalloc.New()
ipSetsMap := bpfipsets.Map(bpfMapContext)
err := ipSetsMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create ipsets BPF map.")
}
ipSetsV4 := bpfipsets.NewBPFIPSets(
ipSetsConfigV4,
ipSetIDAllocator,
ipSetsMap,
dp.loopSummarizer,
)
dp.ipSets = append(dp.ipSets, ipSetsV4)
dp.RegisterManager(newIPSetsManager(ipSetsV4, config.MaxIPSetSize))
bpfRTMgr := newBPFRouteManager(config.Hostname, config.ExternalNodesCidrs, bpfMapContext, dp.loopSummarizer)
dp.RegisterManager(bpfRTMgr)
// Forwarding into an IPIP tunnel fails silently because IPIP tunnels are L3 devices and support for
// L3 devices in BPF is not available yet. Disable the FIB lookup in that case.
fibLookupEnabled := !config.RulesConfig.IPIPEnabled
stateMap := state.Map(bpfMapContext)
err = stateMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create state BPF map.")
}
arpMap := arp.Map(bpfMapContext)
err = arpMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create ARP BPF map.")
}
// The failsafe manager sets up the failsafe port map. It's important that it is registered before the
// endpoint managers so that the map is brought up to date before they run for the first time.
failsafesMap := failsafes.Map(bpfMapContext)
err = failsafesMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create failsafe port BPF map.")
}
failsafeMgr := failsafes.NewManager(
failsafesMap,
config.RulesConfig.FailsafeInboundHostPorts,
config.RulesConfig.FailsafeOutboundHostPorts,
dp.loopSummarizer,
)
dp.RegisterManager(failsafeMgr)
workloadIfaceRegex := regexp.MustCompile(strings.Join(interfaceRegexes, "|"))
bpfEndpointManager = newBPFEndpointManager(
config.BPFLogLevel,
config.Hostname,
fibLookupEnabled,
config.RulesConfig.EndpointToHostAction,
config.BPFDataIfacePattern,
workloadIfaceRegex,
ipSetIDAllocator,
config.VXLANMTU,
uint16(config.VXLANPort),
config.BPFNodePortDSREnabled,
config.BPFExtToServiceConnmark,
ipSetsMap,
stateMap,
ruleRenderer,
filterTableV4,
dp.reportHealth,
dp.loopSummarizer,
)
dp.RegisterManager(bpfEndpointManager)
// Pre-create the NAT maps so that later operations can assume access.
frontendMap := nat.FrontendMap(bpfMapContext)
err = frontendMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT frontend BPF map.")
}
backendMap := nat.BackendMap(bpfMapContext)
err = backendMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT backend BPF map.")
}
backendAffinityMap := nat.AffinityMap(bpfMapContext)
err = backendAffinityMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT backend affinity BPF map.")
}
routeMap := routes.Map(bpfMapContext)
err = routeMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create routes BPF map.")
}
ctMap := conntrack.Map(bpfMapContext)
err = ctMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create conntrack BPF map.")
}
conntrackScanner := conntrack.NewScanner(ctMap,
conntrack.NewLivenessScanner(config.BPFConntrackTimeouts, config.BPFNodePortDSREnabled))
// Before we start, scan for all finished / timed out connections to
// free up the conntrack table asap as it may take time to sync up the
// proxy and kick off the first full cleaner scan.
conntrackScanner.Scan()
bpfproxyOpts := []bpfproxy.Option{
bpfproxy.WithMinSyncPeriod(config.KubeProxyMinSyncPeriod),
}
if config.KubeProxyEndpointSlicesEnabled {
bpfproxyOpts = append(bpfproxyOpts, bpfproxy.WithEndpointsSlices())
}
if config.BPFNodePortDSREnabled {
bpfproxyOpts = append(bpfproxyOpts, bpfproxy.WithDSREnabled())
}
if config.KubeClientSet != nil {
// We have a Kubernetes connection, start watching services and populating the NAT maps.
kp, err := bpfproxy.StartKubeProxy(
config.KubeClientSet,
config.Hostname,
frontendMap,
backendMap,
backendAffinityMap,
ctMap,
bpfproxyOpts...,
)
if err != nil {
log.WithError(err).Panic("Failed to start kube-proxy.")
}
bpfRTMgr.setHostIPUpdatesCallBack(kp.OnHostIPsUpdate)
bpfRTMgr.setRoutesCallBacks(kp.OnRouteUpdate, kp.OnRouteDelete)
conntrackScanner.AddUnlocked(conntrack.NewStaleNATScanner(kp))
conntrackScanner.Start()
} else {
log.Info("BPF enabled but no Kubernetes client available, unable to run kube-proxy module.")
}
if config.BPFConnTimeLBEnabled {
// Activate the connect-time load balancer.
err = nat.InstallConnectTimeLoadBalancer(frontendMap, backendMap, routeMap, config.BPFCgroupV2, config.BPFLogLevel)
if err != nil {
log.WithError(err).Panic("BPFConnTimeLBEnabled but failed to attach connect-time load balancer, bailing out.")
}
} else {
// Deactivate the connect-time load balancer.
err = nat.RemoveConnectTimeLoadBalancer(config.BPFCgroupV2)
if err != nil {
log.WithError(err).Warn("Failed to detach connect-time load balancer. Ignoring.")
}
}
}
routeTableV4 := routetable.New(interfaceRegexes, 4, false, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, config.RemoveExternalRoutes, 0,
dp.loopSummarizer)
epManager := newEndpointManager(
rawTableV4,
mangleTableV4,
filterTableV4,
ruleRenderer,
routeTableV4,
4,
epMarkMapper,
config.RulesConfig.KubeIPVSSupportEnabled,
config.RulesConfig.WorkloadIfacePrefixes,
dp.endpointStatusCombiner.OnEndpointStatusUpdate,
config.BPFEnabled,
bpfEndpointManager,
callbacks)
dp.RegisterManager(epManager)
dp.endpointsSourceV4 = epManager
dp.RegisterManager(newFloatingIPManager(natTableV4, ruleRenderer, 4))
dp.RegisterManager(newMasqManager(ipSetsV4, natTableV4, ruleRenderer, config.MaxIPSetSize, 4))
if config.RulesConfig.IPIPEnabled {
// Add a manger to keep the all-hosts IP set up to date.
dp.ipipManager = newIPIPManager(ipSetsV4, config.MaxIPSetSize, config.ExternalNodesCidrs)
dp.RegisterManager(dp.ipipManager) // IPv4-only
}
// Add a manager for wireguard configuration. This is added irrespective of whether wireguard is actually enabled
// because it may need to tidy up some of the routing rules when disabled.
cryptoRouteTableWireguard := wireguard.New(config.Hostname, &config.Wireguard, config.NetlinkTimeout,
config.DeviceRouteProtocol, func(publicKey wgtypes.Key) error {
if publicKey == zeroKey {
dp.fromDataplane <- &proto.WireguardStatusUpdate{PublicKey: ""}
} else {
dp.fromDataplane <- &proto.WireguardStatusUpdate{PublicKey: publicKey.String()}
}
return nil
},
dp.loopSummarizer)
dp.wireguardManager = newWireguardManager(cryptoRouteTableWireguard, config)
dp.RegisterManager(dp.wireguardManager) // IPv4-only
dp.RegisterManager(newServiceLoopManager(filterTableV4, ruleRenderer, 4))
if config.IPv6Enabled {
mangleTableV6 := iptables.NewTable(
"mangle",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
natTableV6 := iptables.NewTable(
"nat",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesNATOptions,
)
rawTableV6 := iptables.NewTable(
"raw",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
filterTableV6 := iptables.NewTable(
"filter",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
ipSetsConfigV6 := config.RulesConfig.IPSetConfigV6
ipSetsV6 := ipsets.NewIPSets(ipSetsConfigV6, dp.loopSummarizer)
dp.ipSets = append(dp.ipSets, ipSetsV6)
dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV6)
dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV6)
dp.iptablesMangleTables = append(dp.iptablesMangleTables, mangleTableV6)
dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV6)
routeTableV6 := routetable.New(
interfaceRegexes, 6, false, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, config.RemoveExternalRoutes, 0,
dp.loopSummarizer)
if !config.BPFEnabled {
dp.RegisterManager(newIPSetsManager(ipSetsV6, config.MaxIPSetSize))
dp.RegisterManager(newHostIPManager(
config.RulesConfig.WorkloadIfacePrefixes,
rules.IPSetIDThisHostIPs,
ipSetsV6,
config.MaxIPSetSize))
dp.RegisterManager(newPolicyManager(rawTableV6, mangleTableV6, filterTableV6, ruleRenderer, 6))
}
dp.RegisterManager(newEndpointManager(
rawTableV6,
mangleTableV6,
filterTableV6,
ruleRenderer,
routeTableV6,
6,
epMarkMapper,
config.RulesConfig.KubeIPVSSupportEnabled,
config.RulesConfig.WorkloadIfacePrefixes,
dp.endpointStatusCombiner.OnEndpointStatusUpdate,
config.BPFEnabled,
nil,
callbacks))
dp.RegisterManager(newFloatingIPManager(natTableV6, ruleRenderer, 6))
dp.RegisterManager(newMasqManager(ipSetsV6, natTableV6, ruleRenderer, config.MaxIPSetSize, 6))
dp.RegisterManager(newServiceLoopManager(filterTableV6, ruleRenderer, 6))
}
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesMangleTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesNATTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesFilterTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesRawTables...)
// Register that we will report liveness and readiness.
if config.HealthAggregator != nil {
log.Info("Registering to report health.")
config.HealthAggregator.RegisterReporter(
healthName,
&health.HealthReport{Live: true, Ready: true},
healthInterval*2,
)
}
if config.DebugSimulateDataplaneHangAfter != 0 {
log.WithField("delay", config.DebugSimulateDataplaneHangAfter).Warn(
"Simulating a dataplane hang.")
dp.debugHangC = time.After(config.DebugSimulateDataplaneHangAfter)
}
return dp
}
// findHostMTU auto-detects the smallest host interface MTU.
func findHostMTU(matchRegex *regexp.Regexp) (int, error) {
// Find all the interfaces on the host.
links, err := netlink.LinkList()
if err != nil {
log.WithError(err).Error("Failed to list interfaces. Unable to auto-detect MTU")
return 0, err
}
// Iterate through them, keeping track of the lowest MTU.
smallest := 0
for _, l := range links {
// Skip links that we know are not external interfaces.
fields := log.Fields{"mtu": l.Attrs().MTU, "name": l.Attrs().Name}
if matchRegex == nil || !matchRegex.MatchString(l.Attrs().Name) {
log.WithFields(fields).Debug("Skipping interface for MTU detection")
continue
}
log.WithFields(fields).Debug("Examining link for MTU calculation")
if l.Attrs().MTU < smallest || smallest == 0 {
smallest = l.Attrs().MTU
}
}
if smallest == 0 {
// We failed to find a usable interface. Default the MTU of the host
// to 1460 - the smallest among common cloud providers.
log.Warn("Failed to auto-detect host MTU - no interfaces matched the MTU interface pattern. To use auto-MTU, set mtuIfacePattern to match your host's interfaces")
return 1460, nil
}
return smallest, nil
}
// writeMTUFile writes the smallest MTU among enabled encapsulation types to disk
// for use by other components (e.g., CNI plugin).
func writeMTUFile(mtu int) error {
// Make sure directory exists.
if err := os.MkdirAll("/var/lib/calico", os.ModePerm); err != nil {
return fmt.Errorf("failed to create directory /var/lib/calico: %s", err)
}
// Write the smallest MTU to disk so other components can rely on this calculation consistently.
filename := "/var/lib/calico/mtu"
log.Debugf("Writing %d to "+filename, mtu)
if err := ioutil.WriteFile(filename, []byte(fmt.Sprintf("%d", mtu)), 0644); err != nil {
log.WithError(err).Error("Unable to write to " + filename)
return err
}
return nil
}
// determinePodMTU looks at the configured MTUs and enabled encapsulations to determine which
// value for MTU should be used for pod interfaces.
func determinePodMTU(config Config) int {
// Determine the smallest MTU among enabled encap methods. If none of the encap methods are
// enabled, we'll just use the host's MTU.
mtu := 0
type mtuState struct {
mtu int
enabled bool
}
for _, s := range []mtuState{
{config.IPIPMTU, config.RulesConfig.IPIPEnabled},
{config.VXLANMTU, config.RulesConfig.VXLANEnabled},
{config.Wireguard.MTU, config.Wireguard.Enabled},
} {
if s.enabled && s.mtu != 0 && (s.mtu < mtu || mtu == 0) {
mtu = s.mtu
}
}
if mtu == 0 {
// No enabled encapsulation. Just use the host MTU.
mtu = config.hostMTU
} else if mtu > config.hostMTU {
fields := logrus.Fields{"mtu": mtu, "hostMTU": config.hostMTU}
log.WithFields(fields).Warn("Configured MTU is larger than detected host interface MTU")
}
log.WithField("mtu", mtu).Info("Determined pod MTU")
return mtu
}
// ConfigureDefaultMTUs defaults any MTU configurations that have not been set.
// We default the values even if the encap is not enabled, in order to match behavior from earlier versions of Calico.
// However, they MTU will only be considered for allocation to pod interfaces if the encap is enabled.
func ConfigureDefaultMTUs(hostMTU int, c *Config) {
c.hostMTU = hostMTU
if c.IPIPMTU == 0 {
log.Debug("Defaulting IPIP MTU based on host")
c.IPIPMTU = hostMTU - ipipMTUOverhead
}
if c.VXLANMTU == 0 {
log.Debug("Defaulting VXLAN MTU based on host")
c.VXLANMTU = hostMTU - vxlanMTUOverhead
}
if c.Wireguard.MTU == 0 {
if c.KubernetesProvider == config.ProviderAKS && c.RouteSource == "WorkloadIPs" {
// The default MTU on Azure is 1500, but the underlying network stack will fragment packets at 1400 bytes,
// see https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu
// for details.
// Additionally, Wireguard sets the DF bit on its packets, and so if the MTU is set too high large packets
// will be dropped. Therefore it is necessary to allow for the difference between the MTU of the host and
// the underlying network.
log.Debug("Defaulting Wireguard MTU based on host and AKS with WorkloadIPs")
c.Wireguard.MTU = hostMTU - aksMTUOverhead - wireguardMTUOverhead
} else {
log.Debug("Defaulting Wireguard MTU based on host")
c.Wireguard.MTU = hostMTU - wireguardMTUOverhead
}
}
}
func cleanUpVXLANDevice() {
// If VXLAN is not enabled, check to see if there is a VXLAN device and delete it if there is.
log.Debug("Checking if we need to clean up the VXLAN device")
link, err := netlink.LinkByName("vxlan.calico")
if err != nil {
if _, ok := err.(netlink.LinkNotFoundError); ok {
log.Debug("VXLAN disabled and no VXLAN device found")
return
}
log.WithError(err).Warnf("VXLAN disabled and failed to query VXLAN device. Ignoring.")
return
}
if err = netlink.LinkDel(link); err != nil {
log.WithError(err).Error("VXLAN disabled and failed to delete unwanted VXLAN device. Ignoring.")
}
}
type Manager interface {
// OnUpdate is called for each protobuf message from the datastore. May either directly
// send updates to the IPSets and iptables.Table objects (which will queue the updates
// until the main loop instructs them to act) or (for efficiency) may wait until
// a call to CompleteDeferredWork() to flush updates to the dataplane.
OnUpdate(protoBufMsg interface{})
// Called before the main loop flushes updates to the dataplane to allow for batched
// work to be completed.
CompleteDeferredWork() error
}
type ManagerWithRouteTables interface {
Manager
GetRouteTableSyncers() []routeTableSyncer
}
func (d *InternalDataplane) routeTableSyncers() []routeTableSyncer {
var rts []routeTableSyncer
for _, mrts := range d.managersWithRouteTables {
rts = append(rts, mrts.GetRouteTableSyncers()...)
}
return rts
}
func (d *InternalDataplane) RegisterManager(mgr Manager) {
switch mgr := mgr.(type) {
case ManagerWithRouteTables:
// Used to log the whole manager out here but if we do that then we cause races if the manager has
// other threads or locks.
log.WithField("manager", reflect.TypeOf(mgr).Name()).Debug("registering ManagerWithRouteTables")
d.managersWithRouteTables = append(d.managersWithRouteTables, mgr)
}
d.allManagers = append(d.allManagers, mgr)
}
func (d *InternalDataplane) Start() {
// Do our start-of-day configuration.
d.doStaticDataplaneConfig()
// Then, start the worker threads.
go d.loopUpdatingDataplane()
go d.loopReportingStatus()
go d.ifaceMonitor.MonitorInterfaces()
go d.monitorHostMTU()
}
// onIfaceStateChange is our interface monitor callback. It gets called from the monitor's thread.
func (d *InternalDataplane) onIfaceStateChange(ifaceName string, state ifacemonitor.State, ifIndex int) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"ifIndex": ifIndex,
"state": state,
}).Info("Linux interface state changed.")
d.ifaceUpdates <- &ifaceUpdate{
Name: ifaceName,
State: state,
Index: ifIndex,
}
}
type ifaceUpdate struct {
Name string
State ifacemonitor.State
Index int
}
// Check if current felix ipvs config is correct when felix gets an kube-ipvs0 interface update.
// If KubeIPVSInterface is UP and felix ipvs support is disabled (kube-proxy switched from iptables to ipvs mode),
// or if KubeIPVSInterface is DOWN and felix ipvs support is enabled (kube-proxy switched from ipvs to iptables mode),
// restart felix to pick up correct ipvs support mode.
func (d *InternalDataplane) checkIPVSConfigOnStateUpdate(state ifacemonitor.State) {
if (!d.config.RulesConfig.KubeIPVSSupportEnabled && state == ifacemonitor.StateUp) ||
(d.config.RulesConfig.KubeIPVSSupportEnabled && state == ifacemonitor.StateDown) {
log.WithFields(log.Fields{
"ipvsIfaceState": state,
"ipvsSupport": d.config.RulesConfig.KubeIPVSSupportEnabled,
}).Info("kube-proxy mode changed. Restart felix.")
d.config.ConfigChangedRestartCallback()
}
}
// onIfaceAddrsChange is our interface address monitor callback. It gets called
// from the monitor's thread.
func (d *InternalDataplane) onIfaceAddrsChange(ifaceName string, addrs set.Set) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"addrs": addrs,
}).Info("Linux interface addrs changed.")
d.ifaceAddrUpdates <- &ifaceAddrsUpdate{
Name: ifaceName,
Addrs: addrs,
}
}
type ifaceAddrsUpdate struct {
Name string
Addrs set.Set
}
func (d *InternalDataplane) SendMessage(msg interface{}) error {
d.toDataplane <- msg
return nil
}
func (d *InternalDataplane) RecvMessage() (interface{}, error) {
return <-d.fromDataplane, nil
}
func (d *InternalDataplane) monitorHostMTU() {
for {
mtu, err := findHostMTU(d.config.MTUIfacePattern)
if err != nil {
log.WithError(err).Error("Error detecting host MTU")
} else if d.config.hostMTU != mtu {
// Since log writing is done a background thread, we set the force-flush flag on this log to ensure that
// all the in-flight logs get written before we exit.
log.WithFields(log.Fields{lclogutils.FieldForceFlush: true}).Info("Host MTU changed")
d.config.ConfigChangedRestartCallback()
}
time.Sleep(30 * time.Second)
}
}
// doStaticDataplaneConfig sets up the kernel and our static iptables chains. Should be called
// once at start of day before starting the main loop. The actual iptables programming is deferred
// to the main loop.
func (d *InternalDataplane) doStaticDataplaneConfig() {
// Check/configure global kernel parameters.
d.configureKernel()
if d.config.BPFEnabled {
d.setUpIptablesBPF()
} else {
d.setUpIptablesNormal()
}
if d.config.RulesConfig.IPIPEnabled {
log.Info("IPIP enabled, starting thread to keep tunnel configuration in sync.")
go d.ipipManager.KeepIPIPDeviceInSync(
d.config.IPIPMTU,
d.config.RulesConfig.IPIPTunnelAddress,
)
} else {
log.Info("IPIP disabled. Not starting tunnel update thread.")
}
}
func (d *InternalDataplane) setUpIptablesBPF() {
rulesConfig := d.config.RulesConfig
for _, t := range d.iptablesFilterTables {
fwdRules := []iptables.Rule{
{
// Bypass is a strong signal from the BPF program, it means that the flow is approved
// by the program at both ingress and egress.
Comment: []string{"Pre-approved by BPF programs."},
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypass, tc.MarkSeenBypassMask),
Action: iptables.AcceptAction{},
},
}
var inputRules, outputRules []iptables.Rule
// Handle packets for flows that pre-date the BPF programs. The BPF program doesn't have any conntrack
// state for these so it allows them to fall through to iptables with a mark set.
inputRules = append(inputRules,
iptables.Rule{
Match: iptables.Match().
MarkMatchesWithMask(tc.MarkSeenFallThrough, tc.MarkSeenFallThroughMask).
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Accept packets from flows that pre-date BPF."},
Action: iptables.AcceptAction{},
},
iptables.Rule{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenFallThrough, tc.MarkSeenFallThroughMask),
Comment: []string{"Drop packets from unknown flows."},
Action: iptables.DropAction{},
},
)
// Mark traffic leaving the host that already has an established linux conntrack entry.
outputRules = append(outputRules,
iptables.Rule{
Match: iptables.Match().
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Mark pre-established host flows."},
Action: iptables.SetMaskedMarkAction{
Mark: tc.MarkLinuxConntrackEstablished,
Mask: tc.MarkLinuxConntrackEstablishedMask,
},
},
)
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
fwdRules = append(fwdRules,
// Drop packets that have come from a workload but have not been through our BPF program.
iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").NotMarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.DropAction{},
Comment: []string{"From workload without BPF seen mark"},
},
)
if rulesConfig.EndpointToHostAction == "ACCEPT" {
// Only need to worry about ACCEPT here. Drop gets compiled into the BPF program and
// RETURN would be a no-op since there's nothing to RETURN from.
inputRules = append(inputRules, iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").MarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.AcceptAction{},
})
}
// Catch any workload to host packets that haven't been through the BPF program.
inputRules = append(inputRules, iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").NotMarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.DropAction{},
})
}
if t.IPVersion == 6 {
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// In BPF mode, we don't support IPv6 yet. Drop it.
fwdRules = append(fwdRules, iptables.Rule{
Match: iptables.Match().OutInterface(prefix + "+"),
Action: iptables.DropAction{},
Comment: []string{"To workload, drop IPv6."},
})
}
} else {
// Let the BPF programs know if Linux conntrack knows about the flow.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Mark pre-established flows."},
Action: iptables.SetMaskedMarkAction{
Mark: tc.MarkLinuxConntrackEstablished,
Mask: tc.MarkLinuxConntrackEstablishedMask,
},
},
)
// The packet may be about to go to a local workload. However, the local workload may not have a BPF
// program attached (yet). To catch that case, we send the packet through a dispatch chain. We only
// add interfaces to the dispatch chain if the BPF program is in place.
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// Make sure iptables rules don't drop packets that we're about to process through BPF.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().OutInterface(prefix + "+"),
Action: iptables.JumpAction{Target: rules.ChainToWorkloadDispatch},
Comment: []string{"To workload, check workload is known."},
},
)
}
// Need a final rule to accept traffic that is from a workload and going somewhere else.
// Otherwise, if iptables has a DROP policy on the forward chain, the packet will get dropped.
// This rule must come after the to-workload jump rules above to ensure that we don't accept too
// early before the destination is checked.
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// Make sure iptables rules don't drop packets that we're about to process through BPF.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().InInterface(prefix + "+"),
Action: iptables.AcceptAction{},
Comment: []string{"To workload, mark has already been verified."},
},
)
}
}
t.InsertOrAppendRules("INPUT", inputRules)
t.InsertOrAppendRules("FORWARD", fwdRules)
t.InsertOrAppendRules("OUTPUT", outputRules)
}
for _, t := range d.iptablesNATTables {
t.UpdateChains(d.ruleRenderer.StaticNATPostroutingChains(t.IPVersion))
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPostrouting},
}})
}
for _, t := range d.iptablesRawTables {
// Do not RPF check what is marked as to be skipped by RPF check.
rpfRules := []iptables.Rule{{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypassSkipRPF, tc.MarkSeenBypassSkipRPFMask),
Action: iptables.ReturnAction{},
}}
// For anything we approved for forward, permit accept_local as it is
// traffic encapped for NodePort, ICMP replies etc. - stuff we trust.
rpfRules = append(rpfRules, iptables.Rule{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypassForward, tc.MarksMask).RPFCheckPassed(true),
Action: iptables.ReturnAction{},
})
// Do the full RPF check and dis-allow accept_local for anything else.
rpfRules = append(rpfRules, rules.RPFilter(t.IPVersion, tc.MarkSeen, tc.MarkSeenMask,
rulesConfig.OpenStackSpecialCasesEnabled, false)...)
rpfChain := []*iptables.Chain{{
Name: rules.ChainNamePrefix + "RPF",
Rules: rpfRules,
}}
t.UpdateChains(rpfChain)
var rawRules []iptables.Rule
if t.IPVersion == 4 && rulesConfig.WireguardEnabled && len(rulesConfig.WireguardInterfaceName) > 0 &&
rulesConfig.RouteSource == "WorkloadIPs" {
// Set a mark on packets coming from any interface except for lo, wireguard, or pod veths to ensure the RPF
// check allows it.
log.Debug("Adding Wireguard iptables rule chain")
rawRules = append(rawRules, iptables.Rule{
Match: nil,
Action: iptables.JumpAction{Target: rules.ChainSetWireguardIncomingMark},
})
t.UpdateChain(d.ruleRenderer.WireguardIncomingMarkChain())
}
rawRules = append(rawRules, iptables.Rule{
Action: iptables.JumpAction{Target: rpfChain[0].Name},
})
rawChains := []*iptables.Chain{{
Name: rules.ChainRawPrerouting,
Rules: rawRules,
}}
t.UpdateChains(rawChains)
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawPrerouting},
}})
}
if d.config.BPFExtToServiceConnmark != 0 {
mark := uint32(d.config.BPFExtToServiceConnmark)
for _, t := range d.iptablesMangleTables {
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Match: iptables.Match().MarkMatchesWithMask(
tc.MarkSeen|mark,
tc.MarkSeenMask|mark,
),
Comment: []string{"Mark connections with ExtToServiceConnmark"},
Action: iptables.SetConnMarkAction{Mark: mark, Mask: mark},
}})
}
}
}
func (d *InternalDataplane) setUpIptablesNormal() {
for _, t := range d.iptablesRawTables {
rawChains := d.ruleRenderer.StaticRawTableChains(t.IPVersion)
t.UpdateChains(rawChains)
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawPrerouting},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawOutput},
}})
}
for _, t := range d.iptablesFilterTables {
filterChains := d.ruleRenderer.StaticFilterTableChains(t.IPVersion)
t.UpdateChains(filterChains)
t.InsertOrAppendRules("FORWARD", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterForward},
}})
t.InsertOrAppendRules("INPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterInput},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterOutput},
}})
// Include rules which should be appended to the filter table forward chain.
t.AppendRules("FORWARD", d.ruleRenderer.StaticFilterForwardAppendRules())
}
for _, t := range d.iptablesNATTables {
t.UpdateChains(d.ruleRenderer.StaticNATTableChains(t.IPVersion))
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPrerouting},
}})
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPostrouting},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATOutput},
}})
}
for _, t := range d.iptablesMangleTables {
t.UpdateChains(d.ruleRenderer.StaticMangleTableChains(t.IPVersion))
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainManglePrerouting},
}})
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainManglePostrouting},
}})
}
if d.xdpState != nil {
if err := d.setXDPFailsafePorts(); err != nil {
log.Warnf("failed to set XDP failsafe ports, disabling XDP: %v", err)
if err := d.shutdownXDPCompletely(); err != nil {
log.Warnf("failed to disable XDP: %v, will proceed anyway.", err)
}
}
}
}
func stringToProtocol(protocol string) (labelindex.IPSetPortProtocol, error) {
switch protocol {
case "tcp":
return labelindex.ProtocolTCP, nil
case "udp":
return labelindex.ProtocolUDP, nil
case "sctp":
return labelindex.ProtocolSCTP, nil
}
return labelindex.ProtocolNone, fmt.Errorf("unknown protocol %q", protocol)
}
func (d *InternalDataplane) setXDPFailsafePorts() error {
inboundPorts := d.config.RulesConfig.FailsafeInboundHostPorts
if _, err := d.xdpState.common.bpfLib.NewFailsafeMap(); err != nil {
return err
}
for _, p := range inboundPorts {
proto, err := stringToProtocol(p.Protocol)
if err != nil {
return err
}
if err := d.xdpState.common.bpfLib.UpdateFailsafeMap(uint8(proto), p.Port); err != nil {
return err
}
}
log.Infof("Set XDP failsafe ports: %+v", inboundPorts)
return nil
}
// shutdownXDPCompletely attempts to disable XDP state. This could fail in cases where XDP isn't working properly.
func (d *InternalDataplane) shutdownXDPCompletely() error {
if d.xdpState == nil {
return nil
}
if d.callbacks != nil {
d.xdpState.DepopulateCallbacks(d.callbacks)
}
// spend 1 second attempting to wipe XDP, in case of a hiccup.
maxTries := 10
waitInterval := 100 * time.Millisecond
var err error
for i := 0; i < maxTries; i++ {
err = d.xdpState.WipeXDP()
if err == nil {
d.xdpState = nil
return nil
}
log.WithError(err).WithField("try", i).Warn("failed to wipe the XDP state")
time.Sleep(waitInterval)
}
return fmt.Errorf("Failed to wipe the XDP state after %v tries over %v seconds: Error %v", maxTries, waitInterval, err)
}
func (d *InternalDataplane) loopUpdatingDataplane() {
log.Info("Started internal iptables dataplane driver loop")
healthTicks := time.NewTicker(healthInterval).C
d.reportHealth()
// Retry any failed operations every 10s.
retryTicker := time.NewTicker(10 * time.Second)
// If configured, start tickers to refresh the IP sets and routing table entries.
var ipSetsRefreshC <-chan time.Time
if d.config.IPSetsRefreshInterval > 0 {
log.WithField("interval", d.config.IptablesRefreshInterval).Info(
"Will refresh IP sets on timer")
refreshTicker := jitter.NewTicker(
d.config.IPSetsRefreshInterval,
d.config.IPSetsRefreshInterval/10,
)
ipSetsRefreshC = refreshTicker.C
}
var routeRefreshC <-chan time.Time
if d.config.RouteRefreshInterval > 0 {
log.WithField("interval", d.config.RouteRefreshInterval).Info(
"Will refresh routes on timer")
refreshTicker := jitter.NewTicker(
d.config.RouteRefreshInterval,
d.config.RouteRefreshInterval/10,
)
routeRefreshC = refreshTicker.C
}
var xdpRefreshC <-chan time.Time
if d.config.XDPRefreshInterval > 0 && d.xdpState != nil {
log.WithField("interval", d.config.XDPRefreshInterval).Info(
"Will refresh XDP on timer")
refreshTicker := jitter.NewTicker(
d.config.XDPRefreshInterval,
d.config.XDPRefreshInterval/10,
)
xdpRefreshC = refreshTicker.C
}
// Fill the apply throttle leaky bucket.
throttleC := jitter.NewTicker(100*time.Millisecond, 10*time.Millisecond).C
beingThrottled := false
datastoreInSync := false
processMsgFromCalcGraph := func(msg interface{}) {
log.WithField("msg", proto.MsgStringer{Msg: msg}).Infof(
"Received %T update from calculation graph", msg)
d.recordMsgStat(msg)
for _, mgr := range d.allManagers {
mgr.OnUpdate(msg)
}
switch msg.(type) {
case *proto.InSync:
log.WithField("timeSinceStart", time.Since(processStartTime)).Info(
"Datastore in sync, flushing the dataplane for the first time...")
datastoreInSync = true
}
}
processIfaceUpdate := func(ifaceUpdate *ifaceUpdate) {
log.WithField("msg", ifaceUpdate).Info("Received interface update")
if ifaceUpdate.Name == KubeIPVSInterface {
d.checkIPVSConfigOnStateUpdate(ifaceUpdate.State)
return
}
for _, mgr := range d.allManagers {
mgr.OnUpdate(ifaceUpdate)
}
for _, mgr := range d.managersWithRouteTables {
for _, routeTable := range mgr.GetRouteTableSyncers() {
routeTable.OnIfaceStateChanged(ifaceUpdate.Name, ifaceUpdate.State)
}
}
}
processAddrsUpdate := func(ifaceAddrsUpdate *ifaceAddrsUpdate) {
log.WithField("msg", ifaceAddrsUpdate).Info("Received interface addresses update")
for _, mgr := range d.allManagers {
mgr.OnUpdate(ifaceAddrsUpdate)
}
}
for {
select {
case msg := <-d.toDataplane:
// Process the message we received, then opportunistically process any other
// pending messages.
batchSize := 1
processMsgFromCalcGraph(msg)
msgLoop1:
for i := 0; i < msgPeekLimit; i++ {
select {
case msg := <-d.toDataplane:
processMsgFromCalcGraph(msg)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop1
}
}
d.dataplaneNeedsSync = true
summaryBatchSize.Observe(float64(batchSize))
case ifaceUpdate := <-d.ifaceUpdates:
// Process the message we received, then opportunistically process any other
// pending messages.
batchSize := 1
processIfaceUpdate(ifaceUpdate)
msgLoop2:
for i := 0; i < msgPeekLimit; i++ {
select {
case ifaceUpdate := <-d.ifaceUpdates:
processIfaceUpdate(ifaceUpdate)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop2
}
}
d.dataplaneNeedsSync = true
summaryIfaceBatchSize.Observe(float64(batchSize))
case ifaceAddrsUpdate := <-d.ifaceAddrUpdates:
batchSize := 1
processAddrsUpdate(ifaceAddrsUpdate)
msgLoop3:
for i := 0; i < msgPeekLimit; i++ {
select {
case ifaceAddrsUpdate := <-d.ifaceAddrUpdates:
processAddrsUpdate(ifaceAddrsUpdate)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop3
}
}
summaryAddrBatchSize.Observe(float64(batchSize))
d.dataplaneNeedsSync = true
case <-ipSetsRefreshC:
log.Debug("Refreshing IP sets state")
d.forceIPSetsRefresh = true
d.dataplaneNeedsSync = true
case <-routeRefreshC:
log.Debug("Refreshing routes")
d.forceRouteRefresh = true
d.dataplaneNeedsSync = true
case <-xdpRefreshC:
log.Debug("Refreshing XDP")
d.forceXDPRefresh = true
d.dataplaneNeedsSync = true
case <-d.reschedC:
log.Debug("Reschedule kick received")
d.dataplaneNeedsSync = true
// nil out the channel to record that the timer is now inactive.
d.reschedC = nil
case <-throttleC:
d.applyThrottle.Refill()
case <-healthTicks:
d.reportHealth()
case <-retryTicker.C:
case <-d.debugHangC:
log.Warning("Debug hang simulation timer popped, hanging the dataplane!!")
time.Sleep(1 * time.Hour)
log.Panic("Woke up after 1 hour, something's probably wrong with the test.")
}
if datastoreInSync && d.dataplaneNeedsSync {
// Dataplane is out-of-sync, check if we're throttled.
if d.applyThrottle.Admit() {
if beingThrottled && d.applyThrottle.WouldAdmit() {
log.Info("Dataplane updates no longer throttled")
beingThrottled = false
}
log.Debug("Applying dataplane updates")
applyStart := time.Now()
// Actually apply the changes to the dataplane.
d.apply()
// Record stats.
applyTime := time.Since(applyStart)
summaryApplyTime.Observe(applyTime.Seconds())
if d.dataplaneNeedsSync {
// Dataplane is still dirty, record an error.
countDataplaneSyncErrors.Inc()
}
d.loopSummarizer.EndOfIteration(applyTime)
if !d.doneFirstApply {
log.WithField(
"secsSinceStart", time.Since(processStartTime).Seconds(),
).Info("Completed first update to dataplane.")
d.loopSummarizer.RecordOperation("first-update")
d.doneFirstApply = true
if d.config.PostInSyncCallback != nil {
d.config.PostInSyncCallback()
}
}
d.reportHealth()
} else {
if !beingThrottled {
log.Info("Dataplane updates throttled")
beingThrottled = true
}
}
}
}
}
func (d *InternalDataplane) configureKernel() {
// Attempt to modprobe nf_conntrack_proto_sctp. In some kernels this is a
// module that needs to be loaded, otherwise all SCTP packets are marked
// INVALID by conntrack and dropped by Calico's rules. However, some kernels
// (confirmed in Ubuntu 19.10's build of 5.3.0-24-generic) include this
// conntrack without it being a kernel module, and so modprobe will fail.
// Log result at INFO level for troubleshooting, but otherwise ignore any
// failed modprobe calls.
mp := newModProbe(moduleConntrackSCTP, newRealCmd)
out, err := mp.Exec()
log.WithError(err).WithField("output", out).Infof("attempted to modprobe %s", moduleConntrackSCTP)
log.Info("Making sure IPv4 forwarding is enabled.")
err = writeProcSys("/proc/sys/net/ipv4/ip_forward", "1")
if err != nil {
log.WithError(err).Error("Failed to set IPv4 forwarding sysctl")
}
if d.config.IPv6Enabled {
log.Info("Making sure IPv6 forwarding is enabled.")
err = writeProcSys("/proc/sys/net/ipv6/conf/all/forwarding", "1")
if err != nil {
log.WithError(err).Error("Failed to set IPv6 forwarding sysctl")
}
}
if d.config.BPFEnabled && d.config.BPFDisableUnprivileged {
log.Info("BPF enabled, disabling unprivileged BPF usage.")
err := writeProcSys("/proc/sys/kernel/unprivileged_bpf_disabled", "1")
if err != nil {
log.WithError(err).Error("Failed to set unprivileged_bpf_disabled sysctl")
}
}
if d.config.Wireguard.Enabled {
// wireguard module is available in linux kernel >= 5.6
mpwg := newModProbe(moduleWireguard, newRealCmd)
out, err = mpwg.Exec()
log.WithError(err).WithField("output", out).Infof("attempted to modprobe %s", moduleWireguard)
}
}
func (d *InternalDataplane) recordMsgStat(msg interface{}) {
typeName := reflect.ValueOf(msg).Elem().Type().Name()
countMessages.WithLabelValues(typeName).Inc()
}
func (d *InternalDataplane) apply() {
// Update sequencing is important here because iptables rules have dependencies on ipsets.
// Creating a rule that references an unknown IP set fails, as does deleting an IP set that
// is in use.
// Unset the needs-sync flag, we'll set it again if something fails.
d.dataplaneNeedsSync = false
// First, give the managers a chance to resolve any state based on the preceding batch of
// updates. In some cases, e.g. EndpointManager, this can result in an update to another
// manager (BPFEndpointManager.OnHEPUpdate) that must happen before either of those managers
// begins its dataplane programming updates.
for _, mgr := range d.allManagers {
if handler, ok := mgr.(UpdateBatchResolver); ok {
err := handler.ResolveUpdateBatch()
if err != nil {
log.WithField("manager", reflect.TypeOf(mgr).Name()).WithError(err).Debug(
"couldn't resolve update batch for manager, will try again later")
d.dataplaneNeedsSync = true
}
d.reportHealth()
}
}
// Now allow managers to complete the dataplane programming updates that they need.
for _, mgr := range d.allManagers {
err := mgr.CompleteDeferredWork()
if err != nil {
log.WithField("manager", reflect.TypeOf(mgr).Name()).WithError(err).Debug(
"couldn't complete deferred work for manager, will try again later")
d.dataplaneNeedsSync = true
}
d.reportHealth()
}
if d.xdpState != nil {
if d.forceXDPRefresh {
// Refresh timer popped.
d.xdpState.QueueResync()
d.forceXDPRefresh = false
}
var applyXDPError error
d.xdpState.ProcessPendingDiffState(d.endpointsSourceV4)
if err := d.applyXDPActions(); err != nil {
applyXDPError = err
} else {
err := d.xdpState.ProcessMemberUpdates()
d.xdpState.DropPendingDiffState()
if err != nil {
log.WithError(err).Warning("Failed to process XDP member updates, will resync later...")
if err := d.applyXDPActions(); err != nil {
applyXDPError = err
}
}
d.xdpState.UpdateState()
}
if applyXDPError != nil {
log.WithError(applyXDPError).Info("Applying XDP actions did not succeed, disabling XDP")
if err := d.shutdownXDPCompletely(); err != nil {
log.Warnf("failed to disable XDP: %v, will proceed anyway.", err)
}
}
}
d.reportHealth()
if d.forceRouteRefresh {
// Refresh timer popped.
for _, r := range d.routeTableSyncers() {
// Queue a resync on the next Apply().
r.QueueResync()
}
d.forceRouteRefresh = false
}
if d.forceIPSetsRefresh {
// Refresh timer popped.
for _, r := range d.ipSets {
// Queue a resync on the next Apply().
r.QueueResync()
}
d.forceIPSetsRefresh = false
}
// Next, create/update IP sets. We defer deletions of IP sets until after we update
// iptables.
var ipSetsWG sync.WaitGroup
for _, ipSets := range d.ipSets {
ipSetsWG.Add(1)
go func(ipSets ipsetsDataplane) {
ipSets.ApplyUpdates()
d.reportHealth()
ipSetsWG.Done()
}(ipSets)
}
// Update the routing table in parallel with the other updates. We'll wait for it to finish
// before we return.
var routesWG sync.WaitGroup
for _, r := range d.routeTableSyncers() {
routesWG.Add(1)
go func(r routeTableSyncer) {
err := r.Apply()
if err != nil {
log.Warn("Failed to synchronize routing table, will retry...")
d.dataplaneNeedsSync = true
}
d.reportHealth()
routesWG.Done()
}(r)
}
// Wait for the IP sets update to finish. We can't update iptables until it has.
ipSetsWG.Wait()
// Update iptables, this should sever any references to now-unused IP sets.
var reschedDelayMutex sync.Mutex
var reschedDelay time.Duration
var iptablesWG sync.WaitGroup
for _, t := range d.allIptablesTables {
iptablesWG.Add(1)
go func(t *iptables.Table) {
tableReschedAfter := t.Apply()
reschedDelayMutex.Lock()
defer reschedDelayMutex.Unlock()
if tableReschedAfter != 0 && (reschedDelay == 0 || tableReschedAfter < reschedDelay) {
reschedDelay = tableReschedAfter
}
d.reportHealth()
iptablesWG.Done()
}(t)
}
iptablesWG.Wait()
// Now clean up any left-over IP sets.
for _, ipSets := range d.ipSets {
ipSetsWG.Add(1)
go func(s ipsetsDataplane) {
s.ApplyDeletions()
d.reportHealth()
ipSetsWG.Done()
}(ipSets)
}
ipSetsWG.Wait()
// Wait for the route updates to finish.
routesWG.Wait()
// And publish and status updates.
d.endpointStatusCombiner.Apply()
// Set up any needed rescheduling kick.
if d.reschedC != nil {
// We have an active rescheduling timer, stop it so we can restart it with a
// different timeout below if it is still needed.
// This snippet comes from the docs for Timer.Stop().
if !d.reschedTimer.Stop() {
// Timer had already popped, drain its channel.
<-d.reschedC
}
// Nil out our copy of the channel to record that the timer is inactive.
d.reschedC = nil
}
if reschedDelay != 0 {
// We need to reschedule.
log.WithField("delay", reschedDelay).Debug("Asked to reschedule.")
if d.reschedTimer == nil {
// First time, create the timer.
d.reschedTimer = time.NewTimer(reschedDelay)
} else {
// Have an existing timer, reset it.
d.reschedTimer.Reset(reschedDelay)
}
d.reschedC = d.reschedTimer.C
}
}
func (d *InternalDataplane) applyXDPActions() error {
var err error = nil
for i := 0; i < 10; i++ {
err = d.xdpState.ResyncIfNeeded(d.ipsetsSourceV4)
if err != nil {
return err
}
if err = d.xdpState.ApplyBPFActions(d.ipsetsSourceV4); err == nil {
return nil
} else {
log.WithError(err).Info("Applying XDP BPF actions did not succeed, will retry with resync...")
}
}
return err
}
func (d *InternalDataplane) loopReportingStatus() {
log.Info("Started internal status report thread")
if d.config.StatusReportingInterval <= 0 {
log.Info("Process status reports disabled")
return
}
// Wait before first report so that we don't check in if we're in a tight cyclic restart.
time.Sleep(10 * time.Second)
for {
uptimeSecs := time.Since(processStartTime).Seconds()
d.fromDataplane <- &proto.ProcessStatusUpdate{
IsoTimestamp: time.Now().UTC().Format(time.RFC3339),
Uptime: uptimeSecs,
}
time.Sleep(d.config.StatusReportingInterval)
}
}
// iptablesTable is a shim interface for iptables.Table.
type iptablesTable interface {
UpdateChain(chain *iptables.Chain)
UpdateChains([]*iptables.Chain)
RemoveChains([]*iptables.Chain)
RemoveChainByName(name string)
}
func (d *InternalDataplane) reportHealth() {
if d.config.HealthAggregator != nil {
d.config.HealthAggregator.Report(
healthName,
&health.HealthReport{Live: true, Ready: d.doneFirstApply},
)
}
}
type dummyLock struct{}
func (d dummyLock) Lock() {
}
func (d dummyLock) Unlock() {
}
| 1 | 19,217 | This line should actually be: `if c.KubernetesProvider == config.ProviderAKS && c.Wireguard.EncryptHostTraffic {` because we only need to tweak the MTU like this on AKS. | projectcalico-felix | go |
@@ -36,8 +36,14 @@ class FastTemporalMemory(TemporalMemory):
"""
def __init__(self, *args, **kwargs):
+ maxSegmentsPerCell = kwargs.get("maxSegmentsPerCell") or 255
+ maxSynapsesPerSegment = kwargs.get("maxSynapsesPerSegment") or 255
+
super(FastTemporalMemory, self).__init__(*args, **kwargs)
- self.connections = Connections(self.numberOfCells())
+ self.connections = Connections(
+ self.numberOfCells(),
+ maxSegmentsPerCell=maxSegmentsPerCell,
+ maxSynapsesPerSegment=maxSynapsesPerSegment)
def burstColumns(self, | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal Memory implementation in Python.
"""
from nupic.research.temporal_memory import TemporalMemory
from nupic.bindings.algorithms import Connections, ConnectionsCell
class FastTemporalMemory(TemporalMemory):
"""
Class implementing the Temporal Memory algorithm.
Uses C++ Connections data structure for optimization.
"""
def __init__(self, *args, **kwargs):
super(FastTemporalMemory, self).__init__(*args, **kwargs)
self.connections = Connections(self.numberOfCells())
def burstColumns(self,
activeColumns,
predictedColumns,
prevActiveCells,
prevWinnerCells,
connections):
"""
Phase 2: Burst unpredicted columns.
Pseudocode:
- for each unpredicted active column
- mark all cells as active
- mark the best matching cell as winner cell
- (learning)
- if it has no matching segment
- (optimization) if there are prev winner cells
- add a segment to it
- mark the segment as learning
@param activeColumns (set) Indices of active columns in `t`
@param predictedColumns (set) Indices of predicted columns in `t`
@param prevActiveCells (set) Indices of active cells in `t-1`
@param prevWinnerCells (set) Indices of winner cells in `t-1`
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`activeCells` (set),
`winnerCells` (set),
`learningSegments` (set)
"""
activeCells = set()
winnerCells = set()
learningSegments = set()
unpredictedColumns = activeColumns - predictedColumns
for column in unpredictedColumns:
cells = self.cellsForColumn(column)
activeCells.update(cells)
bestSegment = connections.mostActiveSegmentForCells(
list(cells), list(prevActiveCells), self.minThreshold)
if bestSegment is None:
bestCell = self.leastUsedCell(cells, connections)
if len(prevWinnerCells):
bestSegment = connections.createSegment(bestCell)
else:
# TODO: For some reason, bestSegment.cell is garbage-collected after
# this function returns. So we have to use the below hack. Figure out
# why and clean up.
bestCell = ConnectionsCell(bestSegment.cell.idx)
winnerCells.add(bestCell)
if bestSegment:
learningSegments.add(bestSegment)
return activeCells, winnerCells, learningSegments
def computePredictiveCells(self, activeCells, connections):
"""
Phase 4: Compute predictive cells due to lateral input
on distal dendrites.
Pseudocode:
- for each distal dendrite segment with activity >= activationThreshold
- mark the segment as active
- mark the cell as predictive
- for each distal dendrite segment with unconnected
activity >= minThreshold
- mark the segment as matching
- mark the cell as matching
Forward propagates activity from active cells to the synapses that touch
them, to determine which synapses are active.
@param activeCells (set) Indices of active cells in `t`
@param connections (Connections) Connectivity of layer
@return (tuple) Contains:
`activeSegments` (set),
`predictiveCells` (set),
`matchingSegments` (set),
`matchingCells` (set)
"""
activity = connections.computeActivity(list(activeCells),
self.connectedPermanence,
self.activationThreshold)
activeSegments = set(connections.activeSegments(activity))
predictiveCells = set(connections.activeCells(activity))
if self.predictedSegmentDecrement > 0:
activity = connections.computeActivity(list(activeCells),
0,
self.minThreshold)
matchingSegments = set(connections.activeSegments(activity))
matchingCells = set(connections.activeCells(activity))
else:
matchingSegments = set()
matchingCells = set()
return activeSegments, predictiveCells, matchingSegments, matchingCells
@staticmethod
def getCellIndex(cell):
return cell.idx
# ==============================
# Helper functions
# ==============================
def columnForCell(self, cell):
"""
Returns the index of the column that a cell belongs to.
@param cell (int) Cell index
@return (int) Column index
"""
self._validateCell(cell)
return int(cell.idx / self.cellsPerColumn)
def cellsForColumn(self, column):
"""
Returns the indices of cells that belong to a column.
@param column (int) Column index
@return (set) Cell indices
"""
self._validateColumn(column)
start = self.cellsPerColumn * column
end = start + self.cellsPerColumn
return set([ConnectionsCell(idx) for idx in xrange(start, end)])
def _validateCell(self, cell):
"""
Raises an error if cell index is invalid.
@param cell (int) Cell index
"""
if cell.idx >= self.numberOfCells() or cell.idx < 0:
raise IndexError("Invalid cell")
| 1 | 20,003 | `get` has an optional second parameter that is the default if the key isn't found. | numenta-nupic | py |
@@ -63,6 +63,7 @@ if (LDAP_HOST && $ldap_auth) {
if (!$passwd_hash) {
echo "<account_out>\n";
echo " <success/>\n";
+ echo "<id>$user->id</id>\n";
echo "</account_out>\n";
exit();
} | 1 | <?php
// This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2008 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
// RPC handler for account lookup
require_once("../inc/boinc_db.inc");
require_once("../inc/util.inc");
require_once("../inc/email.inc");
require_once("../inc/xml.inc");
require_once("../inc/ldap.inc");
xml_header();
$retval = db_init_xml();
if ($retval) xml_error($retval);
$ldap_auth = get_str("ldap_auth", true);
if (LDAP_HOST && $ldap_auth) {
// LDAP case.
//
$ldap_uid = get_str("ldap_uid");
$passwd = get_str("passwd");
list ($ldap_user, $error_msg) = ldap_auth($ldap_uid, $passwd);
if ($error_msg) {
sleep(LOGIN_FAIL_SLEEP_SEC);
xml_error(ERR_BAD_USER_NAME, $error_msg);
}
$x = ldap_email_string($ldap_uid);
$user = BoincUser::lookup_email_addr($x);
if (!$user) {
$user = make_user_ldap($x, $ldap_user->name);
if (!$user) {
xml_error(-1, "user record creation failed");
}
}
} else {
// normal (non-LDAP) case
$email_addr = get_str("email_addr");
$passwd_hash = get_str("passwd_hash", true);
$email_addr = BoincDb::escape_string($email_addr);
$user = BoincUser::lookup("email_addr='$email_addr'");
if (!$user) {
sleep(LOGIN_FAIL_SLEEP_SEC);
xml_error(ERR_DB_NOT_FOUND);
}
if (!$passwd_hash) {
echo "<account_out>\n";
echo " <success/>\n";
echo "</account_out>\n";
exit();
}
$auth_hash = md5($user->authenticator.$user->email_addr);
// if no password set, set password to account key
//
if (!strlen($user->passwd_hash)) {
$user->passwd_hash = $auth_hash;
$user->update("passwd_hash='$user->passwd_hash'");
}
// if the given password hash matches (auth+email), accept it
//
if ($user->passwd_hash != $passwd_hash && $auth_hash != $passwd_hash) {
sleep(LOGIN_FAIL_SLEEP_SEC);
xml_error(ERR_BAD_PASSWD);
}
}
echo "<account_out>\n";
echo "<authenticator>$user->authenticator</authenticator>\n";
echo "</account_out>\n";
?>
| 1 | 8,938 | Please also indent the new response line as was done to the existing line above. | BOINC-boinc | php |
@@ -3403,6 +3403,13 @@ void Client::Handle_OP_AutoFire(const EQApplicationPacket *app)
DumpPacket(app);
return;
}
+
+ if (this->GetTarget() == this) {
+ this->MessageString(Chat::TooFarAway, TRY_ATTACKING_SOMEONE);
+ auto_fire = false;
+ return;
+ }
+
bool *af = (bool*)app->pBuffer;
auto_fire = *af;
auto_attack = false; | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2016 EQEMu Development Team (http://eqemulator.net)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "../common/global_define.h"
#include "../common/eqemu_logsys.h"
#include "../common/opcodemgr.h"
#include <iomanip>
#include <iostream>
#include <math.h>
#include <set>
#include <stdio.h>
#include <string.h>
#include <zlib.h>
#ifdef _WINDOWS
#define snprintf _snprintf
#define strncasecmp _strnicmp
#define strcasecmp _stricmp
#else
#include <pthread.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <unistd.h>
#endif
#include "../common/crc32.h"
#include "../common/data_verification.h"
#include "../common/faction.h"
#include "../common/guilds.h"
#include "../common/rdtsc.h"
#include "../common/rulesys.h"
#include "../common/skills.h"
#include "../common/spdat.h"
#include "../common/string_util.h"
#include "../common/zone_numbers.h"
#include "data_bucket.h"
#include "event_codes.h"
#include "expedition.h"
#include "expedition_database.h"
#include "guild_mgr.h"
#include "merc.h"
#include "petitions.h"
#include "pets.h"
#include "queryserv.h"
#include "quest_parser_collection.h"
#include "string_ids.h"
#include "titles.h"
#include "water_map.h"
#include "worldserver.h"
#include "zone.h"
#include "mob_movement_manager.h"
#include "../common/repositories/character_instance_safereturns_repository.h"
#include "../common/repositories/criteria/content_filter_criteria.h"
#include "../common/shared_tasks.h"
#include "gm_commands/door_manipulation.h"
#ifdef BOTS
#include "bot.h"
#endif
extern QueryServ* QServ;
extern Zone* zone;
extern volatile bool is_zone_loaded;
extern WorldServer worldserver;
extern PetitionList petition_list;
extern EntityList entity_list;
typedef void (Client::*ClientPacketProc)(const EQApplicationPacket *app);
//Use a map for connecting opcodes since it dosent get used a lot and is sparse
std::map<uint32, ClientPacketProc> ConnectingOpcodes;
//Use a static array for connected, for speed
ClientPacketProc ConnectedOpcodes[_maxEmuOpcode];
void MapOpcodes()
{
ConnectingOpcodes.clear();
memset(ConnectedOpcodes, 0, sizeof(ConnectedOpcodes));
// Now put all the opcodes into their home...
// connecting opcode handler assignments:
ConnectingOpcodes[OP_ApproveZone] = &Client::Handle_Connect_OP_ApproveZone;
ConnectingOpcodes[OP_BlockedBuffs] = &Client::Handle_OP_BlockedBuffs;
ConnectingOpcodes[OP_ClientError] = &Client::Handle_Connect_OP_ClientError;
ConnectingOpcodes[OP_ClientReady] = &Client::Handle_Connect_OP_ClientReady;
ConnectingOpcodes[OP_ClientUpdate] = &Client::Handle_Connect_OP_ClientUpdate;
ConnectingOpcodes[OP_GetGuildsList] = &Client::Handle_OP_GetGuildsList; // temporary hack
ConnectingOpcodes[OP_ReqClientSpawn] = &Client::Handle_Connect_OP_ReqClientSpawn;
ConnectingOpcodes[OP_ReqNewZone] = &Client::Handle_Connect_OP_ReqNewZone;
ConnectingOpcodes[OP_SendAAStats] = &Client::Handle_Connect_OP_SendAAStats;
ConnectingOpcodes[OP_SendAATable] = &Client::Handle_Connect_OP_SendAATable;
ConnectingOpcodes[OP_SendExpZonein] = &Client::Handle_Connect_OP_SendExpZonein;
ConnectingOpcodes[OP_SendGuildTributes] = &Client::Handle_Connect_OP_SendGuildTributes;
ConnectingOpcodes[OP_SendGuildTributes] = &Client::Handle_Connect_OP_SendGuildTributes; // I guess it didn't believe us with the first assignment?
ConnectingOpcodes[OP_SendTributes] = &Client::Handle_Connect_OP_SendTributes;
ConnectingOpcodes[OP_SetServerFilter] = &Client::Handle_Connect_OP_SetServerFilter;
ConnectingOpcodes[OP_SpawnAppearance] = &Client::Handle_Connect_OP_SpawnAppearance;
ConnectingOpcodes[OP_TGB] = &Client::Handle_Connect_OP_TGB;
ConnectingOpcodes[OP_UpdateAA] = &Client::Handle_Connect_OP_UpdateAA;
ConnectingOpcodes[OP_WearChange] = &Client::Handle_Connect_OP_WearChange;
ConnectingOpcodes[OP_WorldObjectsSent] = &Client::Handle_Connect_OP_WorldObjectsSent;
ConnectingOpcodes[OP_XTargetAutoAddHaters] = &Client::Handle_OP_XTargetAutoAddHaters;
ConnectingOpcodes[OP_XTargetRequest] = &Client::Handle_OP_XTargetRequest;
ConnectingOpcodes[OP_ZoneComplete] = &Client::Handle_Connect_OP_ZoneComplete;
ConnectingOpcodes[OP_ZoneEntry] = &Client::Handle_Connect_OP_ZoneEntry;
// connected opcode handler assignments:
ConnectedOpcodes[OP_0x0193] = &Client::Handle_0x0193;
ConnectedOpcodes[OP_AAAction] = &Client::Handle_OP_AAAction;
ConnectedOpcodes[OP_AcceptNewTask] = &Client::Handle_OP_AcceptNewTask;
ConnectedOpcodes[OP_AdventureInfoRequest] = &Client::Handle_OP_AdventureInfoRequest;
ConnectedOpcodes[OP_AdventureLeaderboardRequest] = &Client::Handle_OP_AdventureLeaderboardRequest;
ConnectedOpcodes[OP_AdventureMerchantPurchase] = &Client::Handle_OP_AdventureMerchantPurchase;
ConnectedOpcodes[OP_AdventureMerchantRequest] = &Client::Handle_OP_AdventureMerchantRequest;
ConnectedOpcodes[OP_AdventureMerchantSell] = &Client::Handle_OP_AdventureMerchantSell;
ConnectedOpcodes[OP_AdventureRequest] = &Client::Handle_OP_AdventureRequest;
ConnectedOpcodes[OP_AdventureStatsRequest] = &Client::Handle_OP_AdventureStatsRequest;
ConnectedOpcodes[OP_AggroMeterLockTarget] = &Client::Handle_OP_AggroMeterLockTarget;
ConnectedOpcodes[OP_AltCurrencyMerchantRequest] = &Client::Handle_OP_AltCurrencyMerchantRequest;
ConnectedOpcodes[OP_AltCurrencyPurchase] = &Client::Handle_OP_AltCurrencyPurchase;
ConnectedOpcodes[OP_AltCurrencyReclaim] = &Client::Handle_OP_AltCurrencyReclaim;
ConnectedOpcodes[OP_AltCurrencySell] = &Client::Handle_OP_AltCurrencySell;
ConnectedOpcodes[OP_AltCurrencySellSelection] = &Client::Handle_OP_AltCurrencySellSelection;
ConnectedOpcodes[OP_Animation] = &Client::Handle_OP_Animation;
ConnectedOpcodes[OP_ApplyPoison] = &Client::Handle_OP_ApplyPoison;
ConnectedOpcodes[OP_Assist] = &Client::Handle_OP_Assist;
ConnectedOpcodes[OP_AssistGroup] = &Client::Handle_OP_AssistGroup;
ConnectedOpcodes[OP_AugmentInfo] = &Client::Handle_OP_AugmentInfo;
ConnectedOpcodes[OP_AugmentItem] = &Client::Handle_OP_AugmentItem;
ConnectedOpcodes[OP_AutoAttack] = &Client::Handle_OP_AutoAttack;
ConnectedOpcodes[OP_AutoAttack2] = &Client::Handle_OP_AutoAttack2;
ConnectedOpcodes[OP_AutoFire] = &Client::Handle_OP_AutoFire;
ConnectedOpcodes[OP_Bandolier] = &Client::Handle_OP_Bandolier;
ConnectedOpcodes[OP_BankerChange] = &Client::Handle_OP_BankerChange;
ConnectedOpcodes[OP_Barter] = &Client::Handle_OP_Barter;
ConnectedOpcodes[OP_BazaarInspect] = &Client::Handle_OP_BazaarInspect;
ConnectedOpcodes[OP_BazaarSearch] = &Client::Handle_OP_BazaarSearch;
ConnectedOpcodes[OP_Begging] = &Client::Handle_OP_Begging;
ConnectedOpcodes[OP_Bind_Wound] = &Client::Handle_OP_Bind_Wound;
ConnectedOpcodes[OP_BlockedBuffs] = &Client::Handle_OP_BlockedBuffs;
ConnectedOpcodes[OP_BoardBoat] = &Client::Handle_OP_BoardBoat;
ConnectedOpcodes[OP_Buff] = &Client::Handle_OP_Buff;
ConnectedOpcodes[OP_BuffRemoveRequest] = &Client::Handle_OP_BuffRemoveRequest;
ConnectedOpcodes[OP_Bug] = &Client::Handle_OP_Bug;
ConnectedOpcodes[OP_Camp] = &Client::Handle_OP_Camp;
ConnectedOpcodes[OP_CancelTask] = &Client::Handle_OP_CancelTask;
ConnectedOpcodes[OP_CancelTrade] = &Client::Handle_OP_CancelTrade;
ConnectedOpcodes[OP_CastSpell] = &Client::Handle_OP_CastSpell;
ConnectedOpcodes[OP_ChannelMessage] = &Client::Handle_OP_ChannelMessage;
ConnectedOpcodes[OP_ClearBlockedBuffs] = &Client::Handle_OP_ClearBlockedBuffs;
ConnectedOpcodes[OP_ClearNPCMarks] = &Client::Handle_OP_ClearNPCMarks;
ConnectedOpcodes[OP_ClearSurname] = &Client::Handle_OP_ClearSurname;
ConnectedOpcodes[OP_ClickDoor] = &Client::Handle_OP_ClickDoor;
ConnectedOpcodes[OP_ClickObject] = &Client::Handle_OP_ClickObject;
ConnectedOpcodes[OP_ClickObjectAction] = &Client::Handle_OP_ClickObjectAction;
ConnectedOpcodes[OP_ClientError] = &Client::Handle_OP_ClientError;
ConnectedOpcodes[OP_ClientTimeStamp] = &Client::Handle_OP_ClientTimeStamp;
ConnectedOpcodes[OP_ClientUpdate] = &Client::Handle_OP_ClientUpdate;
ConnectedOpcodes[OP_CombatAbility] = &Client::Handle_OP_CombatAbility;
ConnectedOpcodes[OP_ConfirmDelete] = &Client::Handle_OP_ConfirmDelete;
ConnectedOpcodes[OP_Consent] = &Client::Handle_OP_Consent;
ConnectedOpcodes[OP_ConsentDeny] = &Client::Handle_OP_ConsentDeny;
ConnectedOpcodes[OP_Consider] = &Client::Handle_OP_Consider;
ConnectedOpcodes[OP_ConsiderCorpse] = &Client::Handle_OP_ConsiderCorpse;
ConnectedOpcodes[OP_Consume] = &Client::Handle_OP_Consume;
ConnectedOpcodes[OP_ControlBoat] = &Client::Handle_OP_ControlBoat;
ConnectedOpcodes[OP_CorpseDrag] = &Client::Handle_OP_CorpseDrag;
ConnectedOpcodes[OP_CorpseDrop] = &Client::Handle_OP_CorpseDrop;
ConnectedOpcodes[OP_CrashDump] = &Client::Handle_OP_CrashDump;
ConnectedOpcodes[OP_CrystalCreate] = &Client::Handle_OP_CrystalCreate;
ConnectedOpcodes[OP_CrystalReclaim] = &Client::Handle_OP_CrystalReclaim;
ConnectedOpcodes[OP_Damage] = &Client::Handle_OP_Damage;
ConnectedOpcodes[OP_Death] = &Client::Handle_OP_Death;
ConnectedOpcodes[OP_DelegateAbility] = &Client::Handle_OP_DelegateAbility;
ConnectedOpcodes[OP_DeleteItem] = &Client::Handle_OP_DeleteItem;
ConnectedOpcodes[OP_DeleteSpawn] = &Client::Handle_OP_DeleteSpawn;
ConnectedOpcodes[OP_DeleteSpell] = &Client::Handle_OP_DeleteSpell;
ConnectedOpcodes[OP_Disarm] = &Client::Handle_OP_Disarm;
ConnectedOpcodes[OP_DisarmTraps] = &Client::Handle_OP_DisarmTraps;
ConnectedOpcodes[OP_DoGroupLeadershipAbility] = &Client::Handle_OP_DoGroupLeadershipAbility;
ConnectedOpcodes[OP_DuelResponse] = &Client::Handle_OP_DuelResponse;
ConnectedOpcodes[OP_DuelResponse2] = &Client::Handle_OP_DuelResponse2;
ConnectedOpcodes[OP_DumpName] = &Client::Handle_OP_DumpName;
ConnectedOpcodes[OP_Dye] = &Client::Handle_OP_Dye;
ConnectedOpcodes[OP_DzAddPlayer] = &Client::Handle_OP_DzAddPlayer;
ConnectedOpcodes[OP_DzChooseZoneReply] = &Client::Handle_OP_DzChooseZoneReply;
ConnectedOpcodes[OP_DzExpeditionInviteResponse] = &Client::Handle_OP_DzExpeditionInviteResponse;
ConnectedOpcodes[OP_DzListTimers] = &Client::Handle_OP_DzListTimers;
ConnectedOpcodes[OP_DzMakeLeader] = &Client::Handle_OP_DzMakeLeader;
ConnectedOpcodes[OP_DzPlayerList] = &Client::Handle_OP_DzPlayerList;
ConnectedOpcodes[OP_DzRemovePlayer] = &Client::Handle_OP_DzRemovePlayer;
ConnectedOpcodes[OP_DzSwapPlayer] = &Client::Handle_OP_DzSwapPlayer;
ConnectedOpcodes[OP_DzQuit] = &Client::Handle_OP_DzQuit;
ConnectedOpcodes[OP_Emote] = &Client::Handle_OP_Emote;
ConnectedOpcodes[OP_EndLootRequest] = &Client::Handle_OP_EndLootRequest;
ConnectedOpcodes[OP_EnvDamage] = &Client::Handle_OP_EnvDamage;
ConnectedOpcodes[OP_FaceChange] = &Client::Handle_OP_FaceChange;
ConnectedOpcodes[OP_FeignDeath] = &Client::Handle_OP_FeignDeath;
ConnectedOpcodes[OP_FindPersonRequest] = &Client::Handle_OP_FindPersonRequest;
ConnectedOpcodes[OP_Fishing] = &Client::Handle_OP_Fishing;
ConnectedOpcodes[OP_FloatListThing] = &Client::Handle_OP_MovementHistoryList;
ConnectedOpcodes[OP_Forage] = &Client::Handle_OP_Forage;
ConnectedOpcodes[OP_FriendsWho] = &Client::Handle_OP_FriendsWho;
ConnectedOpcodes[OP_GetGuildMOTD] = &Client::Handle_OP_GetGuildMOTD;
ConnectedOpcodes[OP_GetGuildsList] = &Client::Handle_OP_GetGuildsList;
ConnectedOpcodes[OP_GMBecomeNPC] = &Client::Handle_OP_GMBecomeNPC;
ConnectedOpcodes[OP_GMDelCorpse] = &Client::Handle_OP_GMDelCorpse;
ConnectedOpcodes[OP_GMEmoteZone] = &Client::Handle_OP_GMEmoteZone;
ConnectedOpcodes[OP_GMEndTraining] = &Client::Handle_OP_GMEndTraining;
ConnectedOpcodes[OP_GMFind] = &Client::Handle_OP_GMFind;
ConnectedOpcodes[OP_GMGoto] = &Client::Handle_OP_GMGoto;
ConnectedOpcodes[OP_GMHideMe] = &Client::Handle_OP_GMHideMe;
ConnectedOpcodes[OP_GMKick] = &Client::Handle_OP_GMKick;
ConnectedOpcodes[OP_GMKill] = &Client::Handle_OP_GMKill;
ConnectedOpcodes[OP_GMLastName] = &Client::Handle_OP_GMLastName;
ConnectedOpcodes[OP_GMNameChange] = &Client::Handle_OP_GMNameChange;
ConnectedOpcodes[OP_GMSearchCorpse] = &Client::Handle_OP_GMSearchCorpse;
ConnectedOpcodes[OP_GMServers] = &Client::Handle_OP_GMServers;
ConnectedOpcodes[OP_GMSummon] = &Client::Handle_OP_GMSummon;
ConnectedOpcodes[OP_GMToggle] = &Client::Handle_OP_GMToggle;
ConnectedOpcodes[OP_GMTraining] = &Client::Handle_OP_GMTraining;
ConnectedOpcodes[OP_GMTrainSkill] = &Client::Handle_OP_GMTrainSkill;
ConnectedOpcodes[OP_GMZoneRequest] = &Client::Handle_OP_GMZoneRequest;
ConnectedOpcodes[OP_GMZoneRequest2] = &Client::Handle_OP_GMZoneRequest2;
ConnectedOpcodes[OP_GroundSpawn] = &Client::Handle_OP_CreateObject;
ConnectedOpcodes[OP_GroupAcknowledge] = &Client::Handle_OP_GroupAcknowledge;
ConnectedOpcodes[OP_GroupCancelInvite] = &Client::Handle_OP_GroupCancelInvite;
ConnectedOpcodes[OP_GroupDelete] = &Client::Handle_OP_GroupDelete;
ConnectedOpcodes[OP_GroupDisband] = &Client::Handle_OP_GroupDisband;
ConnectedOpcodes[OP_GroupFollow] = &Client::Handle_OP_GroupFollow;
ConnectedOpcodes[OP_GroupFollow2] = &Client::Handle_OP_GroupFollow2;
ConnectedOpcodes[OP_GroupInvite] = &Client::Handle_OP_GroupInvite;
ConnectedOpcodes[OP_GroupInvite2] = &Client::Handle_OP_GroupInvite2;
ConnectedOpcodes[OP_GroupMakeLeader] = &Client::Handle_OP_GroupMakeLeader;
ConnectedOpcodes[OP_GroupMentor] = &Client::Handle_OP_GroupMentor;
ConnectedOpcodes[OP_GroupRoles] = &Client::Handle_OP_GroupRoles;
ConnectedOpcodes[OP_GroupUpdate] = &Client::Handle_OP_GroupUpdate;
ConnectedOpcodes[OP_GuildBank] = &Client::Handle_OP_GuildBank;
ConnectedOpcodes[OP_GuildCreate] = &Client::Handle_OP_GuildCreate;
ConnectedOpcodes[OP_GuildDelete] = &Client::Handle_OP_GuildDelete;
ConnectedOpcodes[OP_GuildDemote] = &Client::Handle_OP_GuildDemote;
ConnectedOpcodes[OP_GuildInvite] = &Client::Handle_OP_GuildInvite;
ConnectedOpcodes[OP_GuildInviteAccept] = &Client::Handle_OP_GuildInviteAccept;
ConnectedOpcodes[OP_GuildLeader] = &Client::Handle_OP_GuildLeader;
ConnectedOpcodes[OP_GuildManageBanker] = &Client::Handle_OP_GuildManageBanker;
ConnectedOpcodes[OP_GuildPeace] = &Client::Handle_OP_GuildPeace;
ConnectedOpcodes[OP_GuildPromote] = &Client::Handle_OP_GuildPromote;
ConnectedOpcodes[OP_GuildPublicNote] = &Client::Handle_OP_GuildPublicNote;
ConnectedOpcodes[OP_GuildRemove] = &Client::Handle_OP_GuildRemove;
ConnectedOpcodes[OP_GuildStatus] = &Client::Handle_OP_GuildStatus;
ConnectedOpcodes[OP_GuildUpdateURLAndChannel] = &Client::Handle_OP_GuildUpdateURLAndChannel;
ConnectedOpcodes[OP_GuildWar] = &Client::Handle_OP_GuildWar;
ConnectedOpcodes[OP_Heartbeat] = &Client::Handle_OP_Heartbeat;
ConnectedOpcodes[OP_Hide] = &Client::Handle_OP_Hide;
ConnectedOpcodes[OP_HideCorpse] = &Client::Handle_OP_HideCorpse;
ConnectedOpcodes[OP_Illusion] = &Client::Handle_OP_Illusion;
ConnectedOpcodes[OP_InspectAnswer] = &Client::Handle_OP_InspectAnswer;
ConnectedOpcodes[OP_InspectMessageUpdate] = &Client::Handle_OP_InspectMessageUpdate;
ConnectedOpcodes[OP_InspectRequest] = &Client::Handle_OP_InspectRequest;
ConnectedOpcodes[OP_InstillDoubt] = &Client::Handle_OP_InstillDoubt;
ConnectedOpcodes[OP_ItemLinkClick] = &Client::Handle_OP_ItemLinkClick;
ConnectedOpcodes[OP_ItemLinkResponse] = &Client::Handle_OP_ItemLinkResponse;
ConnectedOpcodes[OP_ItemName] = &Client::Handle_OP_ItemName;
ConnectedOpcodes[OP_ItemPreview] = &Client::Handle_OP_ItemPreview;
ConnectedOpcodes[OP_ItemVerifyRequest] = &Client::Handle_OP_ItemVerifyRequest;
ConnectedOpcodes[OP_ItemViewUnknown] = &Client::Handle_OP_Ignore;
ConnectedOpcodes[OP_Jump] = &Client::Handle_OP_Jump;
ConnectedOpcodes[OP_KeyRing] = &Client::Handle_OP_KeyRing;
ConnectedOpcodes[OP_KickPlayers] = &Client::Handle_OP_KickPlayers;
ConnectedOpcodes[OP_LDoNButton] = &Client::Handle_OP_LDoNButton;
ConnectedOpcodes[OP_LDoNDisarmTraps] = &Client::Handle_OP_LDoNDisarmTraps;
ConnectedOpcodes[OP_LDoNInspect] = &Client::Handle_OP_LDoNInspect;
ConnectedOpcodes[OP_LDoNOpen] = &Client::Handle_OP_LDoNOpen;
ConnectedOpcodes[OP_LDoNPickLock] = &Client::Handle_OP_LDoNPickLock;
ConnectedOpcodes[OP_LDoNSenseTraps] = &Client::Handle_OP_LDoNSenseTraps;
ConnectedOpcodes[OP_LeadershipExpToggle] = &Client::Handle_OP_LeadershipExpToggle;
ConnectedOpcodes[OP_LeaveAdventure] = &Client::Handle_OP_LeaveAdventure;
ConnectedOpcodes[OP_LeaveBoat] = &Client::Handle_OP_LeaveBoat;
ConnectedOpcodes[OP_LFGCommand] = &Client::Handle_OP_LFGCommand;
ConnectedOpcodes[OP_LFGGetMatchesRequest] = &Client::Handle_OP_LFGGetMatchesRequest;
ConnectedOpcodes[OP_LFGuild] = &Client::Handle_OP_LFGuild;
ConnectedOpcodes[OP_LFPCommand] = &Client::Handle_OP_LFPCommand;
ConnectedOpcodes[OP_LFPGetMatchesRequest] = &Client::Handle_OP_LFPGetMatchesRequest;
ConnectedOpcodes[OP_LoadSpellSet] = &Client::Handle_OP_LoadSpellSet;
ConnectedOpcodes[OP_Logout] = &Client::Handle_OP_Logout;
ConnectedOpcodes[OP_LootItem] = &Client::Handle_OP_LootItem;
ConnectedOpcodes[OP_LootRequest] = &Client::Handle_OP_LootRequest;
ConnectedOpcodes[OP_ManaChange] = &Client::Handle_OP_ManaChange;
ConnectedOpcodes[OP_MemorizeSpell] = &Client::Handle_OP_MemorizeSpell;
ConnectedOpcodes[OP_Mend] = &Client::Handle_OP_Mend;
ConnectedOpcodes[OP_MercenaryCommand] = &Client::Handle_OP_MercenaryCommand;
ConnectedOpcodes[OP_MercenaryDataRequest] = &Client::Handle_OP_MercenaryDataRequest;
ConnectedOpcodes[OP_MercenaryDataUpdateRequest] = &Client::Handle_OP_MercenaryDataUpdateRequest;
ConnectedOpcodes[OP_MercenaryDismiss] = &Client::Handle_OP_MercenaryDismiss;
ConnectedOpcodes[OP_MercenaryHire] = &Client::Handle_OP_MercenaryHire;
ConnectedOpcodes[OP_MercenarySuspendRequest] = &Client::Handle_OP_MercenarySuspendRequest;
ConnectedOpcodes[OP_MercenaryTimerRequest] = &Client::Handle_OP_MercenaryTimerRequest;
ConnectedOpcodes[OP_MoveCoin] = &Client::Handle_OP_MoveCoin;
ConnectedOpcodes[OP_MoveItem] = &Client::Handle_OP_MoveItem;
ConnectedOpcodes[OP_MoveMultipleItems] = &Client::Handle_OP_MoveMultipleItems;
ConnectedOpcodes[OP_OpenContainer] = &Client::Handle_OP_OpenContainer;
ConnectedOpcodes[OP_OpenGuildTributeMaster] = &Client::Handle_OP_OpenGuildTributeMaster;
ConnectedOpcodes[OP_OpenInventory] = &Client::Handle_OP_OpenInventory;
ConnectedOpcodes[OP_OpenTributeMaster] = &Client::Handle_OP_OpenTributeMaster;
ConnectedOpcodes[OP_PDeletePetition] = &Client::Handle_OP_PDeletePetition;
ConnectedOpcodes[OP_PetCommands] = &Client::Handle_OP_PetCommands;
ConnectedOpcodes[OP_Petition] = &Client::Handle_OP_Petition;
ConnectedOpcodes[OP_PetitionBug] = &Client::Handle_OP_PetitionBug;
ConnectedOpcodes[OP_PetitionCheckIn] = &Client::Handle_OP_PetitionCheckIn;
ConnectedOpcodes[OP_PetitionCheckout] = &Client::Handle_OP_PetitionCheckout;
ConnectedOpcodes[OP_PetitionDelete] = &Client::Handle_OP_PetitionDelete;
ConnectedOpcodes[OP_PetitionQue] = &Client::Handle_OP_PetitionQue;
ConnectedOpcodes[OP_PetitionRefresh] = &Client::Handle_OP_PetitionRefresh;
ConnectedOpcodes[OP_PetitionResolve] = &Client::Handle_OP_PetitionResolve;
ConnectedOpcodes[OP_PetitionUnCheckout] = &Client::Handle_OP_PetitionUnCheckout;
ConnectedOpcodes[OP_PlayerStateAdd] = &Client::Handle_OP_PlayerStateAdd;
ConnectedOpcodes[OP_PlayerStateRemove] = &Client::Handle_OP_PlayerStateRemove;
ConnectedOpcodes[OP_PickPocket] = &Client::Handle_OP_PickPocket;
ConnectedOpcodes[OP_PopupResponse] = &Client::Handle_OP_PopupResponse;
ConnectedOpcodes[OP_PotionBelt] = &Client::Handle_OP_PotionBelt;
ConnectedOpcodes[OP_PurchaseLeadershipAA] = &Client::Handle_OP_PurchaseLeadershipAA;
ConnectedOpcodes[OP_PVPLeaderBoardDetailsRequest] = &Client::Handle_OP_PVPLeaderBoardDetailsRequest;
ConnectedOpcodes[OP_PVPLeaderBoardRequest] = &Client::Handle_OP_PVPLeaderBoardRequest;
ConnectedOpcodes[OP_QueryUCSServerStatus] = &Client::Handle_OP_QueryUCSServerStatus;
ConnectedOpcodes[OP_RaidInvite] = &Client::Handle_OP_RaidCommand;
ConnectedOpcodes[OP_RandomReq] = &Client::Handle_OP_RandomReq;
ConnectedOpcodes[OP_ReadBook] = &Client::Handle_OP_ReadBook;
ConnectedOpcodes[OP_RecipeAutoCombine] = &Client::Handle_OP_RecipeAutoCombine;
ConnectedOpcodes[OP_RecipeDetails] = &Client::Handle_OP_RecipeDetails;
ConnectedOpcodes[OP_RecipesFavorite] = &Client::Handle_OP_RecipesFavorite;
ConnectedOpcodes[OP_RecipesSearch] = &Client::Handle_OP_RecipesSearch;
ConnectedOpcodes[OP_ReloadUI] = &Client::Handle_OP_ReloadUI;
ConnectedOpcodes[OP_RemoveBlockedBuffs] = &Client::Handle_OP_RemoveBlockedBuffs;
ConnectedOpcodes[OP_RemoveTrap] = &Client::Handle_OP_RemoveTrap;
ConnectedOpcodes[OP_Report] = &Client::Handle_OP_Report;
ConnectedOpcodes[OP_RequestDuel] = &Client::Handle_OP_RequestDuel;
ConnectedOpcodes[OP_RequestTitles] = &Client::Handle_OP_RequestTitles;
ConnectedOpcodes[OP_RespawnWindow] = &Client::Handle_OP_RespawnWindow;
ConnectedOpcodes[OP_Rewind] = &Client::Handle_OP_Rewind;
ConnectedOpcodes[OP_RezzAnswer] = &Client::Handle_OP_RezzAnswer;
ConnectedOpcodes[OP_Sacrifice] = &Client::Handle_OP_Sacrifice;
ConnectedOpcodes[OP_SafeFallSuccess] = &Client::Handle_OP_SafeFallSuccess;
ConnectedOpcodes[OP_SafePoint] = &Client::Handle_OP_SafePoint;
ConnectedOpcodes[OP_Save] = &Client::Handle_OP_Save;
ConnectedOpcodes[OP_SaveOnZoneReq] = &Client::Handle_OP_SaveOnZoneReq;
ConnectedOpcodes[OP_SelectTribute] = &Client::Handle_OP_SelectTribute;
// Use or Ignore sense heading based on rule.
bool train = RuleB(Skills, TrainSenseHeading);
ConnectedOpcodes[OP_SenseHeading] =
(train) ? &Client::Handle_OP_SenseHeading : &Client::Handle_OP_Ignore;
ConnectedOpcodes[OP_SenseTraps] = &Client::Handle_OP_SenseTraps;
ConnectedOpcodes[OP_SetGuildMOTD] = &Client::Handle_OP_SetGuildMOTD;
ConnectedOpcodes[OP_SetRunMode] = &Client::Handle_OP_SetRunMode;
ConnectedOpcodes[OP_SetServerFilter] = &Client::Handle_OP_SetServerFilter;
ConnectedOpcodes[OP_SetStartCity] = &Client::Handle_OP_SetStartCity;
ConnectedOpcodes[OP_SetTitle] = &Client::Handle_OP_SetTitle;
ConnectedOpcodes[OP_Shielding] = &Client::Handle_OP_Shielding;
ConnectedOpcodes[OP_ShopEnd] = &Client::Handle_OP_ShopEnd;
ConnectedOpcodes[OP_ShopPlayerBuy] = &Client::Handle_OP_ShopPlayerBuy;
ConnectedOpcodes[OP_ShopPlayerSell] = &Client::Handle_OP_ShopPlayerSell;
ConnectedOpcodes[OP_ShopRequest] = &Client::Handle_OP_ShopRequest;
ConnectedOpcodes[OP_Sneak] = &Client::Handle_OP_Sneak;
ConnectedOpcodes[OP_SpawnAppearance] = &Client::Handle_OP_SpawnAppearance;
ConnectedOpcodes[OP_Split] = &Client::Handle_OP_Split;
ConnectedOpcodes[OP_Surname] = &Client::Handle_OP_Surname;
ConnectedOpcodes[OP_SwapSpell] = &Client::Handle_OP_SwapSpell;
ConnectedOpcodes[OP_TargetCommand] = &Client::Handle_OP_TargetCommand;
ConnectedOpcodes[OP_TargetMouse] = &Client::Handle_OP_TargetMouse;
ConnectedOpcodes[OP_TaskHistoryRequest] = &Client::Handle_OP_TaskHistoryRequest;
ConnectedOpcodes[OP_TaskTimers] = &Client::Handle_OP_TaskTimers;
ConnectedOpcodes[OP_Taunt] = &Client::Handle_OP_Taunt;
ConnectedOpcodes[OP_TestBuff] = &Client::Handle_OP_TestBuff;
ConnectedOpcodes[OP_TGB] = &Client::Handle_OP_TGB;
ConnectedOpcodes[OP_Track] = &Client::Handle_OP_Track;
ConnectedOpcodes[OP_TrackTarget] = &Client::Handle_OP_TrackTarget;
ConnectedOpcodes[OP_TrackUnknown] = &Client::Handle_OP_TrackUnknown;
ConnectedOpcodes[OP_TradeAcceptClick] = &Client::Handle_OP_TradeAcceptClick;
ConnectedOpcodes[OP_TradeBusy] = &Client::Handle_OP_TradeBusy;
ConnectedOpcodes[OP_Trader] = &Client::Handle_OP_Trader;
ConnectedOpcodes[OP_TraderBuy] = &Client::Handle_OP_TraderBuy;
ConnectedOpcodes[OP_TradeRequest] = &Client::Handle_OP_TradeRequest;
ConnectedOpcodes[OP_TradeRequestAck] = &Client::Handle_OP_TradeRequestAck;
ConnectedOpcodes[OP_TraderShop] = &Client::Handle_OP_TraderShop;
ConnectedOpcodes[OP_TradeSkillCombine] = &Client::Handle_OP_TradeSkillCombine;
ConnectedOpcodes[OP_Translocate] = &Client::Handle_OP_Translocate;
ConnectedOpcodes[OP_TributeItem] = &Client::Handle_OP_TributeItem;
ConnectedOpcodes[OP_TributeMoney] = &Client::Handle_OP_TributeMoney;
ConnectedOpcodes[OP_TributeNPC] = &Client::Handle_OP_TributeNPC;
ConnectedOpcodes[OP_TributeToggle] = &Client::Handle_OP_TributeToggle;
ConnectedOpcodes[OP_TributeUpdate] = &Client::Handle_OP_TributeUpdate;
ConnectedOpcodes[OP_VetClaimRequest] = &Client::Handle_OP_VetClaimRequest;
ConnectedOpcodes[OP_VoiceMacroIn] = &Client::Handle_OP_VoiceMacroIn;
ConnectedOpcodes[OP_UpdateAura] = &Client::Handle_OP_UpdateAura;;
ConnectedOpcodes[OP_WearChange] = &Client::Handle_OP_WearChange;
ConnectedOpcodes[OP_WhoAllRequest] = &Client::Handle_OP_WhoAllRequest;
ConnectedOpcodes[OP_WorldUnknown001] = &Client::Handle_OP_Ignore;
ConnectedOpcodes[OP_XTargetAutoAddHaters] = &Client::Handle_OP_XTargetAutoAddHaters;
ConnectedOpcodes[OP_XTargetOpen] = &Client::Handle_OP_XTargetOpen;
ConnectedOpcodes[OP_XTargetRequest] = &Client::Handle_OP_XTargetRequest;
ConnectedOpcodes[OP_YellForHelp] = &Client::Handle_OP_YellForHelp;
ConnectedOpcodes[OP_ZoneChange] = &Client::Handle_OP_ZoneChange;
ConnectedOpcodes[OP_ResetAA] = &Client::Handle_OP_ResetAA;
ConnectedOpcodes[OP_UnderWorld] = &Client::Handle_OP_UnderWorld;
// shared tasks
ConnectedOpcodes[OP_SharedTaskRemovePlayer] = &Client::Handle_OP_SharedTaskRemovePlayer;
ConnectedOpcodes[OP_SharedTaskAddPlayer] = &Client::Handle_OP_SharedTaskAddPlayer;
ConnectedOpcodes[OP_SharedTaskMakeLeader] = &Client::Handle_OP_SharedTaskMakeLeader;
ConnectedOpcodes[OP_SharedTaskInviteResponse] = &Client::Handle_OP_SharedTaskInviteResponse;
ConnectedOpcodes[OP_SharedTaskAcceptNew] = &Client::Handle_OP_SharedTaskAccept;
ConnectedOpcodes[OP_SharedTaskQuit] = &Client::Handle_OP_SharedTaskQuit;
ConnectedOpcodes[OP_SharedTaskPlayerList] = &Client::Handle_OP_SharedTaskPlayerList;
}
void ClearMappedOpcode(EmuOpcode op)
{
if (op >= _maxEmuOpcode)
return;
ConnectedOpcodes[op] = nullptr;
auto iter = ConnectingOpcodes.find(op);
if (iter != ConnectingOpcodes.end()) {
ConnectingOpcodes.erase(iter);
}
}
// client methods
int Client::HandlePacket(const EQApplicationPacket *app)
{
if (LogSys.log_settings[Logs::LogCategory::Netcode].is_category_enabled == 1) {
char buffer[64];
app->build_header_dump(buffer);
Log(Logs::Detail, Logs::PacketClientServer, "Dispatch opcode: %s", buffer);
}
if (LogSys.log_settings[Logs::PacketClientServer].is_category_enabled == 1)
Log(Logs::General, Logs::PacketClientServer, "[%s - 0x%04x] [Size: %u]", OpcodeManager::EmuToName(app->GetOpcode()), app->GetOpcode(), app->Size());
if (LogSys.log_settings[Logs::PacketClientServerWithDump].is_category_enabled == 1)
Log(Logs::General, Logs::PacketClientServerWithDump, "[%s - 0x%04x] [Size: %u] %s", OpcodeManager::EmuToName(app->GetOpcode()), app->GetOpcode(), app->Size(), DumpPacketToString(app).c_str());
EmuOpcode opcode = app->GetOpcode();
if (opcode == OP_AckPacket) {
return true;
}
#if EQDEBUG >= 9
std::cout << "Received 0x" << std::hex << std::setw(4) << std::setfill('0') << opcode << ", size=" << std::dec << app->size << std::endl;
#endif
switch (client_state) {
case CLIENT_CONNECTING: {
if (ConnectingOpcodes.count(opcode) != 1) {
//Hate const cast but everything in lua needs to be non-const even if i make it non-mutable
std::vector<EQ::Any> args;
args.push_back(const_cast<EQApplicationPacket*>(app));
parse->EventPlayer(EVENT_UNHANDLED_OPCODE, this, "", 1, &args);
break;
}
ClientPacketProc p;
p = ConnectingOpcodes[opcode];
//call the processing routine
(this->*p)(app);
//special case where connecting code needs to boot client...
if (client_state == CLIENT_KICKED) {
return(false);
}
break;
}
case CLIENT_CONNECTED: {
ClientPacketProc p;
p = ConnectedOpcodes[opcode];
if (p == nullptr) {
std::vector<EQ::Any> args;
args.push_back(const_cast<EQApplicationPacket*>(app));
parse->EventPlayer(EVENT_UNHANDLED_OPCODE, this, "", 0, &args);
if (LogSys.log_settings[Logs::PacketClientServerUnhandled].is_category_enabled == 1) {
char buffer[64];
app->build_header_dump(buffer);
Log(Logs::General, Logs::PacketClientServerUnhandled, "%s %s", buffer, DumpPacketToString(app).c_str());
}
break;
}
//call the processing routine
(this->*p)(app);
break;
}
case CLIENT_KICKED:
case DISCONNECTED:
case CLIENT_LINKDEAD:
break;
default:
LogDebug("Unknown client_state: [{}]\n", client_state);
break;
}
return(true);
}
// Finish client connecting state
void Client::CompleteConnect()
{
UpdateWho();
client_state = CLIENT_CONNECTED;
SendAllPackets();
hpupdate_timer.Start();
autosave_timer.Start();
SetDuelTarget(0);
SetDueling(false);
EnteringMessages(this);
LoadZoneFlags();
/* Sets GM Flag if needed & Sends Petition Queue */
UpdateAdmin(false);
// Task Packets
LoadClientTaskState();
if (IsInAGuild()) {
uint8 rank = GuildRank();
if (ClientVersion() >= EQ::versions::ClientVersion::RoF)
{
switch (rank) {
case 0: { rank = 5; break; } // GUILD_MEMBER 0
case 1: { rank = 3; break; } // GUILD_OFFICER 1
case 2: { rank = 1; break; } // GUILD_LEADER 2
default: { break; } // GUILD_NONE
}
}
SendAppearancePacket(AT_GuildID, GuildID(), false);
SendAppearancePacket(AT_GuildRank, rank, false);
}
// moved to dbload and translators since we iterate there also .. keep m_pp values whatever they are when they get here
/*const auto sbs = EQ::spells::DynamicLookup(ClientVersion(), GetGM())->SpellbookSize;
for (uint32 spellInt = 0; spellInt < sbs; ++spellInt) {
if (m_pp.spell_book[spellInt] < 3 || m_pp.spell_book[spellInt] > EQ::spells::SPELL_ID_MAX)
m_pp.spell_book[spellInt] = 0xFFFFFFFF;
}*/
//SendAATable();
if (GetHideMe()) Message(Chat::Red, "[GM] You are currently hidden to all clients");
uint32 raidid = database.GetRaidID(GetName());
Raid *raid = nullptr;
if (raidid > 0) {
raid = entity_list.GetRaidByID(raidid);
if (!raid) {
raid = new Raid(raidid);
if (raid->GetID() != 0) {
entity_list.AddRaid(raid, raidid);
raid->LoadLeadership(); // Recreating raid in new zone, get leadership from DB
}
else {
safe_delete(raid);
}
}
if (raid) {
SetRaidGrouped(true);
raid->LearnMembers();
raid->VerifyRaid();
raid->GetRaidDetails();
/*
Only leader should get this; send to all for now till
I figure out correct creation; can probably also send a no longer leader packet for non leaders
but not important for now.
*/
raid->SendRaidCreate(this);
raid->SendMakeLeaderPacketTo(raid->leadername, this);
raid->SendRaidAdd(GetName(), this);
raid->SendBulkRaid(this);
raid->SendGroupUpdate(this);
raid->SendRaidMOTD(this);
if (raid->IsLeader(this)) { // We're a raid leader, lets update just in case!
raid->UpdateRaidAAs();
raid->SendAllRaidLeadershipAA();
}
uint32 grpID = raid->GetGroup(GetName());
if (grpID < 12) {
raid->SendRaidGroupRemove(GetName(), grpID);
raid->SendRaidGroupAdd(GetName(), grpID);
raid->CheckGroupMentor(grpID, this);
if (raid->IsGroupLeader(GetName())) { // group leader same thing!
raid->UpdateGroupAAs(raid->GetGroup(this));
raid->GroupUpdate(grpID, false);
}
}
raid->SendGroupLeadershipAA(this, grpID); // this may get sent an extra time ...
SetXTargetAutoMgr(raid->GetXTargetAutoMgr());
if (!GetXTargetAutoMgr()->empty())
SetDirtyAutoHaters();
if (raid->IsLocked())
raid->SendRaidLockTo(this);
raid->SendHPManaEndPacketsTo(this);
}
}
else {
Group *group = nullptr;
group = this->GetGroup();
if (group)
group->SendHPManaEndPacketsTo(this);
}
//bulk raid send in here eventually
//reapply some buffs
uint32 buff_count = GetMaxTotalSlots();
for (uint32 j1 = 0; j1 < buff_count; j1++) {
if (!IsValidSpell(buffs[j1].spellid))
continue;
const SPDat_Spell_Struct &spell = spells[buffs[j1].spellid];
int NimbusEffect = GetNimbusEffect(buffs[j1].spellid);
if (NimbusEffect) {
if (!IsNimbusEffectActive(NimbusEffect))
SendSpellEffect(NimbusEffect, 500, 0, 1, 3000, true);
}
for (int x1 = 0; x1 < EFFECT_COUNT; x1++) {
switch (spell.effectid[x1]) {
case SE_IllusionCopy:
case SE_Illusion: {
if (spell.base[x1] == -1) {
if (gender == 1)
gender = 0;
else if (gender == 0)
gender = 1;
SendIllusionPacket(GetRace(), gender, 0xFF, 0xFF);
}
else if (spell.base[x1] == -2) // WTF IS THIS
{
if (GetRace() == 128 || GetRace() == 130 || GetRace() <= 12)
SendIllusionPacket(GetRace(), GetGender(), spell.base2[x1], spell.max[x1]);
}
else if (spell.max[x1] > 0)
{
SendIllusionPacket(spell.base[x1], 0xFF, spell.base2[x1], spell.max[x1]);
}
else
{
SendIllusionPacket(spell.base[x1], 0xFF, 0xFF, 0xFF);
}
switch (spell.base[x1]) {
case OGRE:
SendAppearancePacket(AT_Size, 9);
break;
case TROLL:
SendAppearancePacket(AT_Size, 8);
break;
case VAHSHIR:
case BARBARIAN:
SendAppearancePacket(AT_Size, 7);
break;
case HALF_ELF:
case WOOD_ELF:
case DARK_ELF:
case FROGLOK:
SendAppearancePacket(AT_Size, 5);
break;
case DWARF:
SendAppearancePacket(AT_Size, 4);
break;
case HALFLING:
case GNOME:
SendAppearancePacket(AT_Size, 3);
break;
default:
SendAppearancePacket(AT_Size, 6);
break;
}
break;
}
case SE_SummonHorse: {
SummonHorse(buffs[j1].spellid);
//hasmount = true; //this was false, is that the correct thing?
break;
}
case SE_Silence:
{
Silence(true);
break;
}
case SE_Amnesia:
{
Amnesia(true);
break;
}
case SE_DivineAura:
{
invulnerable = true;
break;
}
case SE_Invisibility2:
case SE_Invisibility:
{
invisible = true;
SendAppearancePacket(AT_Invis, 1);
break;
}
case SE_Levitate:
{
if (!zone->CanLevitate())
{
if (!GetGM())
{
SendAppearancePacket(AT_Levitate, 0);
BuffFadeByEffect(SE_Levitate);
Message(Chat::Red, "You can't levitate in this zone.");
}
}
else {
SendAppearancePacket(AT_Levitate, 2);
}
break;
}
case SE_InvisVsUndead2:
case SE_InvisVsUndead:
{
invisible_undead = true;
break;
}
case SE_InvisVsAnimals:
{
invisible_animals = true;
break;
}
case SE_AddMeleeProc:
case SE_WeaponProc:
{
AddProcToWeapon(GetProcID(buffs[j1].spellid, x1), false, 100 + spells[buffs[j1].spellid].base2[x1], buffs[j1].spellid, buffs[j1].casterlevel);
break;
}
case SE_DefensiveProc:
{
AddDefensiveProc(GetProcID(buffs[j1].spellid, x1), 100 + spells[buffs[j1].spellid].base2[x1], buffs[j1].spellid);
break;
}
case SE_RangedProc:
{
AddRangedProc(GetProcID(buffs[j1].spellid, x1), 100 + spells[buffs[j1].spellid].base2[x1], buffs[j1].spellid);
break;
}
}
}
}
/* Sends appearances for all mobs not doing anim_stand aka sitting, looting, playing dead */
entity_list.SendZoneAppearance(this);
/* Sends the Nimbus particle effects (up to 3) for any mob using them */
entity_list.SendNimbusEffects(this);
entity_list.SendUntargetable(this);
int x;
for (x = EQ::textures::textureBegin; x <= EQ::textures::LastTexture; x++) {
SendWearChange(x);
}
// added due to wear change above
UpdateActiveLight();
SendAppearancePacket(AT_Light, GetActiveLightType());
Mob *pet = GetPet();
if (pet != nullptr) {
for (x = EQ::textures::textureBegin; x <= EQ::textures::LastTexture; x++) {
pet->SendWearChange(x);
}
// added due to wear change above
pet->UpdateActiveLight();
pet->SendAppearancePacket(AT_Light, pet->GetActiveLightType());
}
entity_list.SendTraders(this);
if (GetPet()) {
GetPet()->SendPetBuffsToClient();
}
if (GetGroup())
database.RefreshGroupFromDB(this);
if (RuleB(TaskSystem, EnableTaskSystem))
TaskPeriodic_Timer.Start();
else
TaskPeriodic_Timer.Disable();
conn_state = ClientConnectFinished;
//enforce some rules..
if (!CanBeInZone()) {
LogDebug("[CLIENT] Kicking char from zone, not allowed here");
GoToSafeCoords(ZoneID("arena"), 0);
return;
}
if (zone)
zone->weatherSend(this);
TotalKarma = database.GetKarma(AccountID());
SendDisciplineTimers();
parse->EventPlayer(EVENT_ENTER_ZONE, this, "", 0);
SetLastPositionBeforeBulkUpdate(GetPosition());
/* This sub event is for if a player logs in for the first time since entering world. */
if (firstlogon == 1) {
parse->EventPlayer(EVENT_CONNECT, this, "", 0);
/* QS: PlayerLogConnectDisconnect */
if (RuleB(QueryServ, PlayerLogConnectDisconnect)) {
std::string event_desc = StringFormat("Connect :: Logged into zoneid:%i instid:%i", this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Connect_State, this->CharacterID(), event_desc);
}
/**
* Update last login since this doesn't get updated until a late save later so we can update online status
*/
database.QueryDatabase(
StringFormat(
"UPDATE `character_data` SET `last_login` = UNIX_TIMESTAMP() WHERE id = %u",
CharacterID()
)
);
}
if (zone && zone->GetInstanceTimer()) {
bool is_permanent = false;
uint32 remaining_time_seconds = database.GetTimeRemainingInstance(zone->GetInstanceID(), is_permanent);
uint32 day = (remaining_time_seconds / 86400);
uint32 hour = (remaining_time_seconds / 3600) % 24;
uint32 minute = (remaining_time_seconds / 60) % 60;
uint32 second = (remaining_time_seconds / 1) % 60;
if (day) {
Message(
Chat::Yellow, "%s (%u) will expire in %u days, %u hours, %u minutes, and %u seconds.",
zone->GetLongName(), zone->GetInstanceID(), day, hour, minute, second
);
}
else if (hour) {
Message(
Chat::Yellow, "%s (%u) will expire in %u hours, %u minutes, and %u seconds.",
zone->GetLongName(), zone->GetInstanceID(), hour, minute, second
);
}
else if (minute) {
Message(
Chat::Yellow, "%s (%u) will expire in %u minutes, and %u seconds.",
zone->GetLongName(), zone->GetInstanceID(), minute, second
);
}
else {
Message(
Chat::Yellow, "%s (%u) will expire in in %u seconds.",
zone->GetLongName(), zone->GetInstanceID(), second
);
}
}
SendRewards();
SendAltCurrencies();
database.LoadAltCurrencyValues(CharacterID(), alternate_currency);
SendAlternateCurrencyValues();
alternate_currency_loaded = true;
ProcessAlternateCurrencyQueue();
/* This needs to be set, this determines whether or not data was loaded properly before a save */
client_data_loaded = true;
CalcItemScale();
DoItemEnterZone();
if (zone->GetZoneID() == RuleI(World, GuildBankZoneID) && GuildBanks)
GuildBanks->SendGuildBank(this);
if (ClientVersion() >= EQ::versions::ClientVersion::SoD)
entity_list.SendFindableNPCList(this);
if (IsInAGuild()) {
SendGuildRanks();
guild_mgr.SendGuildMemberUpdateToWorld(GetName(), GuildID(), zone->GetZoneID(), time(nullptr));
guild_mgr.RequestOnlineGuildMembers(this->CharacterID(), this->GuildID());
}
SendDynamicZoneUpdates();
/** Request adventure info **/
auto pack = new ServerPacket(ServerOP_AdventureDataRequest, 64);
strcpy((char*)pack->pBuffer, GetName());
worldserver.SendPacket(pack);
delete pack;
if (IsClient() && CastToClient()->ClientVersionBit() & EQ::versions::maskUFAndLater) {
EQApplicationPacket *outapp = MakeBuffsPacket(false);
CastToClient()->FastQueuePacket(&outapp);
}
// TODO: load these states
// We at least will set them to the correct state for now
if (m_ClientVersionBit & EQ::versions::maskUFAndLater && GetPet()) {
SetPetCommandState(PET_BUTTON_SIT, 0);
SetPetCommandState(PET_BUTTON_STOP, 0);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
SetPetCommandState(PET_BUTTON_FOLLOW, 1);
SetPetCommandState(PET_BUTTON_GUARD, 0);
// Taunt saved on client side for logging on with pet
// In our db for when we zone.
SetPetCommandState(PET_BUTTON_HOLD, 0);
SetPetCommandState(PET_BUTTON_GHOLD, 0);
SetPetCommandState(PET_BUTTON_FOCUS, 0);
SetPetCommandState(PET_BUTTON_SPELLHOLD, 0);
}
database.LoadAuras(this); // this ends up spawning them so probably safer to load this later (here)
entity_list.RefreshClientXTargets(this);
worldserver.RequestTellQueue(GetName());
entity_list.ScanCloseMobs(close_mobs, this, true);
if (GetGM() && IsDevToolsEnabled()) {
ShowDevToolsMenu();
}
// shared tasks memberlist
if (GetTaskState()->HasActiveSharedTask()) {
// struct
auto p = new ServerPacket(
ServerOP_SharedTaskRequestMemberlist,
sizeof(ServerSharedTaskRequestMemberlist_Struct)
);
auto *r = (ServerSharedTaskRequestMemberlist_Struct *) p->pBuffer;
// fill
r->source_character_id = CharacterID();
r->task_id = GetTaskState()->GetActiveSharedTask().task_id;
// send
worldserver.SendPacket(p);
safe_delete(p);
}
}
// connecting opcode handlers
/*
void Client::Handle_Connect_0x3e33(const EQApplicationPacket *app)
{
//OP_0x0380 = 0x642c
EQApplicationPacket* outapp = new EQApplicationPacket(OP_0x0380, sizeof(uint32)); // Dunno
QueuePacket(outapp);
safe_delete(outapp);
return;
}
*/
void Client::Handle_Connect_OP_ApproveZone(const EQApplicationPacket *app)
{
if (app->size != sizeof(ApproveZone_Struct)) {
LogError("Invalid size on OP_ApproveZone: Expected [{}], Got [{}]",
sizeof(ApproveZone_Struct), app->size);
return;
}
ApproveZone_Struct* azone = (ApproveZone_Struct*)app->pBuffer;
azone->approve = 1;
QueuePacket(app);
return;
}
void Client::Handle_Connect_OP_ClientError(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClientError_Struct)) {
LogError("Invalid size on OP_ClientError: Expected [{}], Got [{}]",
sizeof(ClientError_Struct), app->size);
return;
}
// Client reporting error to server
ClientError_Struct* error = (ClientError_Struct*)app->pBuffer;
LogError("Client error: [{}]", error->character_name);
LogError("Error message: [{}]", error->message);
Message(Chat::Red, error->message);
#if (EQDEBUG>=5)
DumpPacket(app);
#endif
return;
}
void Client::Handle_Connect_OP_ClientReady(const EQApplicationPacket *app)
{
conn_state = ClientReadyReceived;
if (!Spawned())
SendZoneInPackets();
CompleteConnect();
SendHPUpdate();
}
void Client::Handle_Connect_OP_ClientUpdate(const EQApplicationPacket *app)
{
//Once we get this, the client thinks it is connected
//So give it the benefit of the doubt and move to connected
Handle_Connect_OP_ClientReady(app);
}
void Client::Handle_Connect_OP_ReqClientSpawn(const EQApplicationPacket *app)
{
conn_state = ClientSpawnRequested;
auto outapp = new EQApplicationPacket;
// Send Zone Doors
if (entity_list.MakeDoorSpawnPacket(outapp, this))
{
QueuePacket(outapp);
}
safe_delete(outapp);
// Send Zone Objects
entity_list.SendZoneObjects(this);
SendZonePoints();
// Live does this
outapp = new EQApplicationPacket(OP_SendAAStats, 0);
FastQueuePacket(&outapp);
// Tell client they can continue we're done
outapp = new EQApplicationPacket(OP_ZoneServerReady, 0);
FastQueuePacket(&outapp);
outapp = new EQApplicationPacket(OP_SendExpZonein, 0);
FastQueuePacket(&outapp);
if (ClientVersion() >= EQ::versions::ClientVersion::RoF)
{
outapp = new EQApplicationPacket(OP_ClientReady, 0);
FastQueuePacket(&outapp);
}
// New for Secrets of Faydwer - Used in Place of OP_SendExpZonein
outapp = new EQApplicationPacket(OP_WorldObjectsSent, 0);
QueuePacket(outapp);
safe_delete(outapp);
if (strncasecmp(zone->GetShortName(), "bazaar", 6) == 0)
SendBazaarWelcome();
conn_state = ZoneContentsSent;
return;
}
void Client::Handle_Connect_OP_ReqNewZone(const EQApplicationPacket *app)
{
conn_state = NewZoneRequested;
EQApplicationPacket* outapp = nullptr;
/////////////////////////////////////
// New Zone Packet
outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct));
NewZone_Struct* nz = (NewZone_Struct*)outapp->pBuffer;
memcpy(outapp->pBuffer, &zone->newzone_data, sizeof(NewZone_Struct));
strcpy(nz->char_name, m_pp.name);
// This was using FastQueuePacket and the packet was never getting sent...
// Not sure if this was timing.... but the NewZone was never logged until
// I changed it.
outapp->priority = 6;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_Connect_OP_SendAAStats(const EQApplicationPacket *app)
{
SendAlternateAdvancementTimers();
auto outapp = new EQApplicationPacket(OP_SendAAStats, 0);
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_Connect_OP_SendAATable(const EQApplicationPacket *app)
{
SendAlternateAdvancementTable();
return;
}
void Client::Handle_Connect_OP_SendExpZonein(const EQApplicationPacket *app)
{
auto outapp = new EQApplicationPacket(OP_SendExpZonein, 0);
QueuePacket(outapp);
safe_delete(outapp);
// SoF+ Gets Zone-In packets after sending OP_WorldObjectsSent
if (ClientVersion() < EQ::versions::ClientVersion::SoF)
{
SendZoneInPackets();
}
return;
}
void Client::Handle_Connect_OP_SendGuildTributes(const EQApplicationPacket *app)
{
SendGuildTributes();
return;
}
void Client::Handle_Connect_OP_SendTributes(const EQApplicationPacket *app)
{
SendTributes();
return;
}
void Client::Handle_Connect_OP_SetServerFilter(const EQApplicationPacket *app)
{
if (app->size != sizeof(SetServerFilter_Struct)) {
LogError("Received invalid sized OP_SetServerFilter");
DumpPacket(app);
return;
}
SetServerFilter_Struct* filter = (SetServerFilter_Struct*)app->pBuffer;
ServerFilter(filter);
return;
}
void Client::Handle_Connect_OP_SpawnAppearance(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_Connect_OP_TGB(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
LogError("Invalid size on OP_TGB: Expected [{}], Got [{}]",
sizeof(uint32), app->size);
return;
}
OPTGB(app);
return;
}
void Client::Handle_Connect_OP_UpdateAA(const EQApplicationPacket *app)
{
SendAlternateAdvancementPoints();
}
void Client::Handle_Connect_OP_WearChange(const EQApplicationPacket *app)
{
//not sure what these are supposed to mean to us.
return;
}
void Client::Handle_Connect_OP_WorldObjectsSent(const EQApplicationPacket *app)
{
// New for SoF+
auto outapp = new EQApplicationPacket(OP_WorldObjectsSent, 0);
QueuePacket(outapp);
safe_delete(outapp);
// Packet order changed for SoF+, so below is sent here instead of OP_SendExpLogin
SendZoneInPackets();
if (RuleB(Mercs, AllowMercs))
{
SpawnMercOnZone();
}
return;
}
void Client::Handle_Connect_OP_ZoneComplete(const EQApplicationPacket *app)
{
auto outapp = new EQApplicationPacket(OP_0x0347, 0);
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_Connect_OP_ZoneEntry(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClientZoneEntry_Struct))
return;
ClientZoneEntry_Struct *cze = (ClientZoneEntry_Struct *)app->pBuffer;
if (strlen(cze->char_name) > 63)
return;
conn_state = ReceivedZoneEntry;
SetClientVersion(Connection()->ClientVersion());
m_ClientVersionBit = EQ::versions::ConvertClientVersionToClientVersionBit(Connection()->ClientVersion());
m_pp.SetPlayerProfileVersion(m_ClientVersion);
m_inv.SetInventoryVersion(m_ClientVersion);
/* Antighost code
tmp var is so the search doesnt find this object
*/
Client* client = entity_list.GetClientByName(cze->char_name);
if (!zone->GetAuth(ip, cze->char_name, &WID, &account_id, &character_id, &admin, lskey, &tellsoff)) {
LogClientLogin("[{}] failed zone auth check", cze->char_name);
if (nullptr != client) {
client->Save();
client->Kick("Failed auth check");
}
return;
}
strcpy(name, cze->char_name);
/* Check for Client Spoofing */
if (client != 0) {
struct in_addr ghost_addr;
ghost_addr.s_addr = eqs->GetRemoteIP();
LogError("Ghosting client: Account ID:[{}] Name:[{}] Character:[{}] IP:[{}]",
client->AccountID(), client->AccountName(), client->GetName(), inet_ntoa(ghost_addr));
client->Save();
client->Disconnect();
}
uint32 pplen = 0;
EQApplicationPacket* outapp = nullptr;
MYSQL_RES* result = nullptr;
bool loaditems = 0;
uint32 i;
std::string query;
unsigned long* lengths = nullptr;
uint32 cid = CharacterID();
character_id = cid; /* Global character_id reference */
/* Flush and reload factions */
database.RemoveTempFactions(this);
database.LoadCharacterFactionValues(cid, factionvalues);
/* Load Character Account Data: Temp until I move */
query = StringFormat("SELECT `status`, `name`, `ls_id`, `lsaccount_id`, `gmspeed`, `revoked`, `hideme`, `time_creation` FROM `account` WHERE `id` = %u", this->AccountID());
auto results = database.QueryDatabase(query);
for (auto row = results.begin(); row != results.end(); ++row) {
admin = atoi(row[0]);
strn0cpy(account_name, row[1], sizeof(account_name));
strn0cpy(loginserver, row[2], sizeof(loginserver));
lsaccountid = atoi(row[3]);
gmspeed = atoi(row[4]);
revoked = atoi(row[5]);
gm_hide_me = atoi(row[6]);
account_creation = atoul(row[7]);
}
/* Load Character Data */
query = StringFormat("SELECT `lfp`, `lfg`, `xtargets`, `firstlogon`, `guild_id`, `rank` FROM `character_data` LEFT JOIN `guild_members` ON `id` = `char_id` WHERE `id` = %i", cid);
results = database.QueryDatabase(query);
for (auto row = results.begin(); row != results.end(); ++row) {
if (row[4] && atoi(row[4]) > 0) {
guild_id = atoi(row[4]);
if (row[5] != nullptr) { guildrank = atoi(row[5]); }
else { guildrank = GUILD_RANK_NONE; }
}
if (LFP) { LFP = atoi(row[0]); }
if (LFG) { LFG = atoi(row[1]); }
if (row[3])
firstlogon = atoi(row[3]);
}
if (RuleB(Character, SharedBankPlat))
m_pp.platinum_shared = database.GetSharedPlatinum(this->AccountID());
database.ClearOldRecastTimestamps(cid); /* Clear out our old recast timestamps to keep the DB clean */
// set to full support in case they're a gm with items in disabled expansion slots..but, have their gm flag off...
// item loss will occur when they use the 'empty' slots, if this is not done
m_inv.SetGMInventory(true);
loaditems = database.GetInventory(cid, &m_inv); /* Load Character Inventory */
database.LoadCharacterBandolier(cid, &m_pp); /* Load Character Bandolier */
database.LoadCharacterBindPoint(cid, &m_pp); /* Load Character Bind */
database.LoadCharacterMaterialColor(cid, &m_pp); /* Load Character Material */
database.LoadCharacterPotions(cid, &m_pp); /* Load Character Potion Belt */
database.LoadCharacterCurrency(cid, &m_pp); /* Load Character Currency into PP */
database.LoadCharacterData(cid, &m_pp, &m_epp); /* Load Character Data from DB into PP as well as E_PP */
database.LoadCharacterSkills(cid, &m_pp); /* Load Character Skills */
database.LoadCharacterInspectMessage(cid, &m_inspect_message); /* Load Character Inspect Message */
database.LoadCharacterSpellBook(cid, &m_pp); /* Load Character Spell Book */
database.LoadCharacterMemmedSpells(cid, &m_pp); /* Load Character Memorized Spells */
database.LoadCharacterDisciplines(cid, &m_pp); /* Load Character Disciplines */
database.LoadCharacterLanguages(cid, &m_pp); /* Load Character Languages */
database.LoadCharacterLeadershipAA(cid, &m_pp); /* Load Character Leadership AA's */
database.LoadCharacterTribute(cid, &m_pp); /* Load CharacterTribute */
/* Load AdventureStats */
AdventureStats_Struct as;
if (database.GetAdventureStats(cid, &as))
{
m_pp.ldon_wins_guk = as.success.guk;
m_pp.ldon_wins_mir = as.success.mir;
m_pp.ldon_wins_mmc = as.success.mmc;
m_pp.ldon_wins_ruj = as.success.ruj;
m_pp.ldon_wins_tak = as.success.tak;
m_pp.ldon_losses_guk = as.failure.guk;
m_pp.ldon_losses_mir = as.failure.mir;
m_pp.ldon_losses_mmc = as.failure.mmc;
m_pp.ldon_losses_ruj = as.failure.ruj;
m_pp.ldon_losses_tak = as.failure.tak;
}
/* Set item material tint */
for (int i = EQ::textures::textureBegin; i <= EQ::textures::LastTexture; i++)
{
if (m_pp.item_tint.Slot[i].UseTint == 1 || m_pp.item_tint.Slot[i].UseTint == 255)
{
m_pp.item_tint.Slot[i].UseTint = 0xFF;
}
}
if (level) { level = m_pp.level; }
/* If GM, not trackable */
if (gm_hide_me) { trackable = false; }
/* Set Con State for Reporting */
conn_state = PlayerProfileLoaded;
m_pp.zone_id = zone->GetZoneID();
m_pp.zoneInstance = zone->GetInstanceID();
/* Set Total Seconds Played */
TotalSecondsPlayed = m_pp.timePlayedMin * 60;
/* If we can maintain intoxication across zones, check for it */
if (!RuleB(Character, MaintainIntoxicationAcrossZones))
m_pp.intoxication = 0;
strcpy(name, m_pp.name);
strcpy(lastname, m_pp.last_name);
/* If PP is set to weird coordinates */
if ((m_pp.x == -1 && m_pp.y == -1 && m_pp.z == -1) || (m_pp.x == -2 && m_pp.y == -2 && m_pp.z == -2)) {
auto zone_safe_point = zone->GetSafePoint();
m_pp.x = zone_safe_point.x;
m_pp.y = zone_safe_point.y;
m_pp.z = zone_safe_point.z;
m_pp.heading = zone_safe_point.w;
}
/* If too far below ground, then fix */
// float ground_z = GetGroundZ(m_pp.x, m_pp.y, m_pp.z);
// if (m_pp.z < (ground_z - 500))
// m_pp.z = ground_z;
/* Set Mob variables for spawn */
class_ = m_pp.class_;
level = m_pp.level;
m_Position.x = m_pp.x;
m_Position.y = m_pp.y;
m_Position.z = m_pp.z;
m_Position.w = m_pp.heading;
race = m_pp.race;
base_race = m_pp.race;
gender = m_pp.gender;
base_gender = m_pp.gender;
deity = m_pp.deity;
haircolor = m_pp.haircolor;
beardcolor = m_pp.beardcolor;
eyecolor1 = m_pp.eyecolor1;
eyecolor2 = m_pp.eyecolor2;
hairstyle = m_pp.hairstyle;
luclinface = m_pp.face;
beard = m_pp.beard;
drakkin_heritage = m_pp.drakkin_heritage;
drakkin_tattoo = m_pp.drakkin_tattoo;
drakkin_details = m_pp.drakkin_details;
// Max Level for Character:PerCharacterQglobalMaxLevel and Character:PerCharacterBucketMaxLevel
int client_max_level = 0;
if (RuleB(Character, PerCharacterQglobalMaxLevel)) {
client_max_level = GetCharMaxLevelFromQGlobal();
} else if (RuleB(Character, PerCharacterBucketMaxLevel)) {
client_max_level = GetCharMaxLevelFromBucket();
}
SetClientMaxLevel(client_max_level);
// we know our class now, so we might have to fix our consume timer!
if (class_ == MONK)
consume_food_timer.SetTimer(CONSUMPTION_MNK_TIMER);
InitInnates();
/* If GM not set in DB, and does not meet min status to be GM, reset */
if (m_pp.gm && admin < minStatusToBeGM)
m_pp.gm = 0;
/* Load Guild */
if (!IsInAGuild()) {
m_pp.guild_id = GUILD_NONE;
}
else {
m_pp.guild_id = GuildID();
uint8 rank = guild_mgr.GetDisplayedRank(GuildID(), GuildRank(), CharacterID());
// FIXME: RoF guild rank
if (ClientVersion() >= EQ::versions::ClientVersion::RoF) {
switch (rank) {
case 0:
rank = 5;
break;
case 1:
rank = 3;
break;
case 2:
rank = 1;
break;
default:
break;
}
}
m_pp.guildrank = rank;
if (zone->GetZoneID() == RuleI(World, GuildBankZoneID))
GuildBanker = (guild_mgr.IsGuildLeader(GuildID(), CharacterID()) || guild_mgr.GetBankerFlag(CharacterID()));
}
m_pp.guildbanker = GuildBanker;
switch (race)
{
case OGRE:
size = 9; break;
case TROLL:
size = 8; break;
case VAHSHIR: case BARBARIAN:
size = 7; break;
case HUMAN: case HIGH_ELF: case ERUDITE: case IKSAR: case DRAKKIN:
size = 6; break;
case HALF_ELF:
size = 5.5; break;
case WOOD_ELF: case DARK_ELF: case FROGLOK:
size = 5; break;
case DWARF:
size = 4; break;
case HALFLING:
size = 3.5; break;
case GNOME:
size = 3; break;
default:
size = 0;
}
/* Check for Invalid points */
if (m_pp.ldon_points_guk < 0 || m_pp.ldon_points_guk > 2000000000) { m_pp.ldon_points_guk = 0; }
if (m_pp.ldon_points_mir < 0 || m_pp.ldon_points_mir > 2000000000) { m_pp.ldon_points_mir = 0; }
if (m_pp.ldon_points_mmc < 0 || m_pp.ldon_points_mmc > 2000000000) { m_pp.ldon_points_mmc = 0; }
if (m_pp.ldon_points_ruj < 0 || m_pp.ldon_points_ruj > 2000000000) { m_pp.ldon_points_ruj = 0; }
if (m_pp.ldon_points_tak < 0 || m_pp.ldon_points_tak > 2000000000) { m_pp.ldon_points_tak = 0; }
if (m_pp.ldon_points_available < 0 || m_pp.ldon_points_available > 2000000000) { m_pp.ldon_points_available = 0; }
if (RuleB(World, UseClientBasedExpansionSettings)) {
m_pp.expansions = EQ::expansions::ConvertClientVersionToExpansionsMask(ClientVersion());
}
else {
m_pp.expansions = (RuleI(World, ExpansionSettings) & EQ::expansions::ConvertClientVersionToExpansionsMask(ClientVersion()));
}
if (!database.LoadAlternateAdvancement(this)) {
LogError("Error loading AA points for [{}]", GetName());
}
if (SPDAT_RECORDS > 0) {
for (uint32 z = 0; z < EQ::spells::SPELL_GEM_COUNT; z++) {
if (m_pp.mem_spells[z] >= (uint32)SPDAT_RECORDS)
UnmemSpell(z, false);
}
database.LoadBuffs(this);
uint32 max_slots = GetMaxBuffSlots();
for (int i = 0; i < BUFF_COUNT; i++) {
if (buffs[i].spellid != SPELL_UNKNOWN) {
m_pp.buffs[i].spellid = buffs[i].spellid;
m_pp.buffs[i].bard_modifier = buffs[i].instrument_mod;
m_pp.buffs[i].effect_type = 2;
m_pp.buffs[i].player_id = 0x2211;
m_pp.buffs[i].level = buffs[i].casterlevel;
m_pp.buffs[i].unknown003 = 0;
m_pp.buffs[i].duration = buffs[i].ticsremaining;
m_pp.buffs[i].counters = buffs[i].counters;
m_pp.buffs[i].num_hits = buffs[i].numhits;
}
else {
m_pp.buffs[i].spellid = SPELLBOOK_UNKNOWN;
m_pp.buffs[i].bard_modifier = 10;
m_pp.buffs[i].effect_type = 0;
m_pp.buffs[i].player_id = 0;
m_pp.buffs[i].level = 0;
m_pp.buffs[i].unknown003 = 0;
m_pp.buffs[i].duration = 0;
m_pp.buffs[i].counters = 0;
m_pp.buffs[i].num_hits = 0;
}
}
}
/* Load Character Key Ring */
KeyRingLoad();
/* Send Group Members via PP */
uint32 groupid = database.GetGroupID(GetName());
Group* group = nullptr;
if (groupid > 0) {
group = entity_list.GetGroupByID(groupid);
if (!group) { //nobody from our is here... start a new group
group = new Group(groupid);
if (group->GetID() != 0)
entity_list.AddGroup(group, groupid);
else //error loading group members...
{
delete group;
group = nullptr;
}
} //else, somebody from our group is already here...
if (!group)
database.SetGroupID(GetName(), 0, CharacterID(), false); //cannot re-establish group, kill it
}
else { //no group id
//clear out the group junk in our PP
uint32 xy = 0;
for (xy = 0; xy < MAX_GROUP_MEMBERS; xy++)
memset(m_pp.groupMembers[xy], 0, 64);
}
if (group) {
// If the group leader is not set, pull the group leader infomrmation from the database.
if (!group->GetLeader()) {
char ln[64];
char MainTankName[64];
char AssistName[64];
char PullerName[64];
char NPCMarkerName[64];
char mentoree_name[64];
int mentor_percent;
GroupLeadershipAA_Struct GLAA;
memset(ln, 0, 64);
database.GetGroupLeadershipInfo(group->GetID(), ln, MainTankName, AssistName, PullerName, NPCMarkerName, mentoree_name, &mentor_percent, &GLAA);
Client *c = entity_list.GetClientByName(ln);
if (c)
group->SetLeader(c);
group->SetMainTank(MainTankName);
group->SetMainAssist(AssistName);
group->SetPuller(PullerName);
group->SetNPCMarker(NPCMarkerName);
group->SetGroupAAs(&GLAA);
group->SetGroupMentor(mentor_percent, mentoree_name);
//group->NotifyMainTank(this, 1);
//group->NotifyMainAssist(this, 1);
//group->NotifyPuller(this, 1);
// If we are the leader, force an update of our group AAs to other members in the zone, in case
// we purchased a new one while out-of-zone.
if (group->IsLeader(this))
group->SendLeadershipAAUpdate();
}
JoinGroupXTargets(group);
group->UpdatePlayer(this);
LFG = false;
}
#ifdef BOTS
database.botdb.LoadOwnerOptions(this);
// TODO: mod below function for loading spawned botgroups
Bot::LoadAndSpawnAllZonedBots(this);
#endif
m_inv.SetGMInventory((bool)m_pp.gm); // set to current gm state for calc
CalcBonuses();
if (RuleB(Zone, EnableLoggedOffReplenishments) &&
time(nullptr) - m_pp.lastlogin >= RuleI(Zone, MinOfflineTimeToReplenishments)) {
m_pp.cur_hp = GetMaxHP();
m_pp.mana = GetMaxMana();
m_pp.endurance = GetMaxEndurance();
}
if (m_pp.cur_hp <= 0)
m_pp.cur_hp = GetMaxHP();
SetHP(m_pp.cur_hp);
Mob::SetMana(m_pp.mana); // mob function doesn't send the packet
SetEndurance(m_pp.endurance);
/* Update LFP in case any (or all) of our group disbanded while we were zoning. */
if (IsLFP()) { UpdateLFP(); }
p_timers.SetCharID(CharacterID());
if (!p_timers.Load(&database)) {
LogError("Unable to load ability timers from the database for [{}] ([{}])!", GetCleanName(), CharacterID());
}
/* Load Spell Slot Refresh from Currently Memoried Spells */
for (unsigned int i = 0; i < EQ::spells::SPELL_GEM_COUNT; ++i)
if (IsValidSpell(m_pp.mem_spells[i]))
m_pp.spellSlotRefresh[i] = p_timers.GetRemainingTime(pTimerSpellStart + m_pp.mem_spells[i]) * 1000;
/* Ability slot refresh send SK/PAL */
if (m_pp.class_ == SHADOWKNIGHT || m_pp.class_ == PALADIN) {
uint32 abilitynum = 0;
if (m_pp.class_ == SHADOWKNIGHT) { abilitynum = pTimerHarmTouch; }
else { abilitynum = pTimerLayHands; }
uint32 remaining = p_timers.GetRemainingTime(abilitynum);
if (remaining > 0 && remaining < 15300)
m_pp.abilitySlotRefresh = remaining * 1000;
else
m_pp.abilitySlotRefresh = 0;
}
#ifdef _EQDEBUG
printf("Dumping inventory on load:\n");
m_inv.dumpEntireInventory();
#endif
/* Reset to max so they dont drown on zone in if its underwater */
m_pp.air_remaining = 60;
/* Check for PVP Zone status*/
if (zone->IsPVPZone())
m_pp.pvp = 1;
/* Time entitled on Account: Move to account */
m_pp.timeentitledonaccount = database.GetTotalTimeEntitledOnAccount(AccountID()) / 1440;
/* Reset rest timer if the durations have been lowered in the database */
if ((m_pp.RestTimer > RuleI(Character, RestRegenTimeToActivate)) && (m_pp.RestTimer > RuleI(Character, RestRegenRaidTimeToActivate)))
m_pp.RestTimer = 0;
/* This checksum should disappear once dynamic structs are in... each struct strategy will do it */ // looks to be in place now
//CRC32::SetEQChecksum((unsigned char*)&m_pp, sizeof(PlayerProfile_Struct) - sizeof(m_pp.m_player_profile_version) - 4);
// m_pp.checksum = 0; // All server out-bound player profile packets are now translated - no need to waste cycles calculating this...
outapp = new EQApplicationPacket(OP_PlayerProfile, sizeof(PlayerProfile_Struct));
/* The entityid field in the Player Profile is used by the Client in relation to Group Leadership AA */
m_pp.entityid = GetID();
memcpy(outapp->pBuffer, &m_pp, outapp->size);
outapp->priority = 6;
FastQueuePacket(&outapp);
if (m_pp.RestTimer)
rest_timer.Start(m_pp.RestTimer * 1000);
/* Load Pet */
database.LoadPetInfo(this);
if (m_petinfo.SpellID > 1 && !GetPet() && m_petinfo.SpellID <= SPDAT_RECORDS) {
MakePoweredPet(m_petinfo.SpellID, spells[m_petinfo.SpellID].teleport_zone, m_petinfo.petpower, m_petinfo.Name, m_petinfo.size);
if (GetPet() && GetPet()->IsNPC()) {
NPC *pet = GetPet()->CastToNPC();
pet->SetPetState(m_petinfo.Buffs, m_petinfo.Items);
pet->CalcBonuses();
pet->SetHP(m_petinfo.HP);
pet->SetMana(m_petinfo.Mana);
// Taunt persists when zoning on newer clients, overwrite default.
if (m_ClientVersionBit & EQ::versions::maskUFAndLater) {
if (!firstlogon) {
pet->SetTaunting(m_petinfo.taunting);
}
}
}
m_petinfo.SpellID = 0;
}
/* Moved here so it's after where we load the pet data. */
if (!aabonuses.ZoneSuspendMinion && !spellbonuses.ZoneSuspendMinion && !itembonuses.ZoneSuspendMinion) {
memset(&m_suspendedminion, 0, sizeof(PetInfo));
}
/* Server Zone Entry Packet */
outapp = new EQApplicationPacket(OP_ZoneEntry, sizeof(ServerZoneEntry_Struct));
ServerZoneEntry_Struct* sze = (ServerZoneEntry_Struct*)outapp->pBuffer;
FillSpawnStruct(&sze->player, CastToMob());
sze->player.spawn.curHp = 1;
sze->player.spawn.NPC = 0;
sze->player.spawn.z += 6; //arbitrary lift, seems to help spawning under zone.
outapp->priority = 6;
FastQueuePacket(&outapp);
/* Zone Spawns Packet */
entity_list.SendZoneSpawnsBulk(this);
entity_list.SendZoneCorpsesBulk(this);
entity_list.SendZonePVPUpdates(this); //hack until spawn struct is fixed.
/* Time of Day packet */
outapp = new EQApplicationPacket(OP_TimeOfDay, sizeof(TimeOfDay_Struct));
TimeOfDay_Struct* tod = (TimeOfDay_Struct*)outapp->pBuffer;
zone->zone_time.GetCurrentEQTimeOfDay(time(0), tod);
outapp->priority = 6;
FastQueuePacket(&outapp);
/* Tribute Packets */
DoTributeUpdate();
if (m_pp.tribute_active) {
//restart the tribute timer where we left off
tribute_timer.Start(m_pp.tribute_time_remaining);
}
/*
Character Inventory Packet
this is not quite where live sends inventory, they do it after tribute
*/
if (loaditems) { /* Dont load if a length error occurs */
if (admin >= minStatusToBeGM)
m_inv.SetGMInventory(true); // set to true to allow expansion-restricted packets through
BulkSendInventoryItems();
/* Send stuff on the cursor which isnt sent in bulk */
for (auto iter = m_inv.cursor_cbegin(); iter != m_inv.cursor_cend(); ++iter) {
/* First item cursor is sent in bulk inventory packet */
if (iter == m_inv.cursor_cbegin())
continue;
const EQ::ItemInstance *inst = *iter;
SendItemPacket(EQ::invslot::slotCursor, inst, ItemPacketLimbo);
}
// this is kinda hackish atm..this process needs to be realigned to allow a contiguous flow
m_inv.SetGMInventory((bool)m_pp.gm); // reset back to current gm state
}
ApplyWeaponsStance();
auto dynamic_zone_member_entries = DynamicZoneMembersRepository::GetWhere(database,
fmt::format("character_id = {}", CharacterID()));
for (const auto& entry : dynamic_zone_member_entries)
{
m_dynamic_zone_ids.emplace_back(entry.dynamic_zone_id);
}
m_expedition_id = ExpeditionsRepository::GetIDByMemberID(database, CharacterID());
auto dz = zone->GetDynamicZone();
if (dz && dz->GetSafeReturnLocation().zone_id != 0)
{
auto safereturn = dz->GetSafeReturnLocation();
auto safereturn_entry = CharacterInstanceSafereturnsRepository::NewEntity();
safereturn_entry.character_id = CharacterID();
safereturn_entry.instance_zone_id = zone->GetZoneID();
safereturn_entry.instance_id = zone->GetInstanceID();
safereturn_entry.safe_zone_id = safereturn.zone_id;
safereturn_entry.safe_x = safereturn.x;
safereturn_entry.safe_y = safereturn.y;
safereturn_entry.safe_z = safereturn.z;
safereturn_entry.safe_heading = safereturn.heading;
CharacterInstanceSafereturnsRepository::InsertOneOrUpdate(database, safereturn_entry);
}
else
{
CharacterInstanceSafereturnsRepository::DeleteWhere(database,
fmt::format("character_id = {}", character_id));
}
/**
* DevTools Load Settings
*/
if (Admin() >= EQ::DevTools::GM_ACCOUNT_STATUS_LEVEL) {
std::string dev_tools_window_key = StringFormat("%i-dev-tools-disabled", AccountID());
if (DataBucket::GetData(dev_tools_window_key) == "true") {
dev_tools_enabled = false;
}
}
if (m_ClientVersionBit & EQ::versions::maskUFAndLater) {
outapp = new EQApplicationPacket(OP_XTargetResponse, 8);
outapp->WriteUInt32(GetMaxXTargets());
outapp->WriteUInt32(0);
FastQueuePacket(&outapp);
}
/*
Weather Packet
This shouldent be moved, this seems to be what the client
uses to advance to the next state (sending ReqNewZone)
*/
outapp = new EQApplicationPacket(OP_Weather, 12);
Weather_Struct *ws = (Weather_Struct *)outapp->pBuffer;
ws->val1 = 0x000000FF;
if (zone->zone_weather == 1) { ws->type = 0x31; } // Rain
if (zone->zone_weather == 2) {
outapp->pBuffer[8] = 0x01;
ws->type = 0x02;
}
outapp->priority = 6;
QueuePacket(outapp);
safe_delete(outapp);
if (ClientVersion() >= EQ::versions::ClientVersion::RoF) {
Handle_Connect_OP_ReqNewZone(nullptr);
}
SetAttackTimer();
conn_state = ZoneInfoSent;
zoneinpacket_timer.Start();
return;
}
// connected opcode handlers
void Client::Handle_0x0193(const EQApplicationPacket *app)
{
// Not sure what this opcode does. It started being sent when OP_ClientUpdate was
// changed to pump OP_ClientUpdate back out instead of OP_MobUpdate
// 2 bytes: 00 00
return;
}
void Client::Handle_OP_AAAction(const EQApplicationPacket *app)
{
LogAA("Received OP_AAAction");
if (app->size != sizeof(AA_Action)) {
LogAA("Error! OP_AAAction size didnt match!");
return;
}
AA_Action* action = (AA_Action*)app->pBuffer;
if (action->action == aaActionActivate) {//AA Hotkey
LogAA("Activating AA [{}]", action->ability);
ActivateAlternateAdvancementAbility(action->ability, action->target_id);
}
else if (action->action == aaActionBuy) {
PurchaseAlternateAdvancementRank(action->ability);
}
else if (action->action == aaActionDisableEXP) { //Turn Off AA Exp
if (m_epp.perAA > 0)
MessageString(Chat::White, AA_OFF);
m_epp.perAA = 0;
SendAlternateAdvancementStats();
}
else if (action->action == aaActionSetEXP) {
if (m_epp.perAA == 0)
MessageString(Chat::White, AA_ON);
m_epp.perAA = action->exp_value;
if (m_epp.perAA < 0 || m_epp.perAA > 100)
m_epp.perAA = 0; // stop exploit with sanity check
// send an update
SendAlternateAdvancementStats();
SendAlternateAdvancementTable();
}
else {
LogAA("Unknown AA action : [{}] [{}] [{}] [{}]", action->action, action->ability, action->target_id, action->exp_value);
}
}
void Client::Handle_OP_AcceptNewTask(const EQApplicationPacket *app)
{
if (app->size != sizeof(AcceptNewTask_Struct)) {
LogDebug("Size mismatch in OP_AcceptNewTask expected [{}] got [{}]", sizeof(AcceptNewTask_Struct), app->size);
DumpPacket(app);
return;
}
AcceptNewTask_Struct *ant = (AcceptNewTask_Struct*)app->pBuffer;
if (ant->task_id > 0 && RuleB(TaskSystem, EnableTaskSystem) && task_state)
task_state->AcceptNewTask(this, ant->task_id, ant->task_master_id, std::time(nullptr));
}
void Client::Handle_OP_AdventureInfoRequest(const EQApplicationPacket *app)
{
if (app->size < sizeof(EntityId_Struct))
{
LogError("Handle_OP_AdventureInfoRequest had a packet that was too small");
return;
}
EntityId_Struct* ent = (EntityId_Struct*)app->pBuffer;
Mob * m = entity_list.GetMob(ent->entity_id);
if (m && m->IsNPC())
{
std::map<uint32, std::string>::iterator it;
it = zone->adventure_entry_list_flavor.find(m->CastToNPC()->GetAdventureTemplate());
if (it != zone->adventure_entry_list_flavor.end())
{
auto outapp = new EQApplicationPacket(OP_AdventureInfo, (it->second.size() + 2));
strn0cpy((char*)outapp->pBuffer, it->second.c_str(), it->second.size());
FastQueuePacket(&outapp);
}
else
{
if (m->CastToNPC()->GetAdventureTemplate() != 0)
{
std::string text = "Choose your difficulty and preferred adventure type.";
auto outapp = new EQApplicationPacket(OP_AdventureInfo, (text.size() + 2));
strn0cpy((char*)outapp->pBuffer, text.c_str(), text.size());
FastQueuePacket(&outapp);
}
}
}
}
void Client::Handle_OP_AdventureLeaderboardRequest(const EQApplicationPacket *app)
{
if (app->size < sizeof(AdventureLeaderboardRequest_Struct))
{
return;
}
if (adventure_leaderboard_timer)
{
return;
}
adventure_leaderboard_timer = new Timer(4000);
auto pack = new ServerPacket(ServerOP_AdventureLeaderboard, sizeof(ServerLeaderboardRequest_Struct));
ServerLeaderboardRequest_Struct *lr = (ServerLeaderboardRequest_Struct*)pack->pBuffer;
strcpy(lr->player, GetName());
AdventureLeaderboardRequest_Struct *lrs = (AdventureLeaderboardRequest_Struct*)app->pBuffer;
lr->type = 1 + (lrs->theme * 2) + lrs->type;
worldserver.SendPacket(pack);
delete pack;
}
void Client::Handle_OP_AdventureMerchantPurchase(const EQApplicationPacket *app)
{
if (app->size != sizeof(Adventure_Purchase_Struct))
{
LogError("OP size error: OP_AdventureMerchantPurchase expected:[{}] got:[{}]", sizeof(Adventure_Purchase_Struct), app->size);
return;
}
Adventure_Purchase_Struct* aps = (Adventure_Purchase_Struct*)app->pBuffer;
/*
Get item apc->itemid (can check NPC if thats necessary), ldon point theme check only if theme is not 0 (I am not sure what 1-5 are though for themes)
if(ldon_points_available >= item ldonpointcost)
{
give item (67 00 00 00 for the packettype using opcode 0x02c5)
ldon_points_available -= ldonpointcost;
}
*/
uint32 merchantid = 0;
Mob* tmp = entity_list.GetMob(aps->npcid);
if (tmp == 0 || !tmp->IsNPC() || ((tmp->GetClass() != ADVENTUREMERCHANT) &&
(tmp->GetClass() != DISCORD_MERCHANT) && (tmp->GetClass() != NORRATHS_KEEPERS_MERCHANT) && (tmp->GetClass() != DARK_REIGN_MERCHANT)))
return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, tmp->GetPosition()) > USE_NPC_RANGE2)
return;
merchantid = tmp->CastToNPC()->MerchantType;
const EQ::ItemData* item = nullptr;
bool found = false;
std::list<MerchantList> merlist = zone->merchanttable[merchantid];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tmp->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (!item)
continue;
if (item->ID == aps->itemid) { //This check to make sure that the item is actually on the NPC, people attempt to inject packets to get items summoned...
found = true;
break;
}
}
if (!item || !found) {
Message(Chat::Red, "Error: The item you purchased does not exist!");
return;
}
if (aps->Type == LDoNMerchant)
{
if (m_pp.ldon_points_available < int32(item->LDoNPrice)) {
Message(Chat::Red, "You cannot afford that item.");
return;
}
if (item->LDoNTheme <= 16)
{
if (item->LDoNTheme & 16)
{
if (m_pp.ldon_points_tak < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in tak to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (item->LDoNTheme & 8)
{
if (m_pp.ldon_points_ruj < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in ruj to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (item->LDoNTheme & 4)
{
if (m_pp.ldon_points_mmc < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in mmc to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (item->LDoNTheme & 2)
{
if (m_pp.ldon_points_mir < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in mir to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (item->LDoNTheme & 1)
{
if (m_pp.ldon_points_guk < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in guk to purchase this item.", int32(item->LDoNPrice));
return;
}
}
}
}
else if (aps->Type == DiscordMerchant)
{
if (GetPVPPoints() < item->LDoNPrice)
{
Message(Chat::Red, "You need at least %u PVP points to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (aps->Type == NorrathsKeepersMerchant)
{
if (GetRadiantCrystals() < item->LDoNPrice)
{
Message(Chat::Red, "You need at least %u Radiant Crystals to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (aps->Type == DarkReignMerchant)
{
if (GetEbonCrystals() < item->LDoNPrice)
{
Message(Chat::Red, "You need at least %u Ebon Crystals to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else
{
Message(Chat::Red, "Unknown Adventure Merchant type.");
return;
}
if (CheckLoreConflict(item))
{
Message(Chat::Yellow, "You can only have one of a lore item.");
return;
}
if (aps->Type == LDoNMerchant)
{
int32 requiredpts = (int32)item->LDoNPrice*-1;
if (!UpdateLDoNPoints(6, requiredpts))
return;
}
else if (aps->Type == DiscordMerchant)
{
SetPVPPoints(GetPVPPoints() - (int32)item->LDoNPrice);
SendPVPStats();
}
else if (aps->Type == NorrathsKeepersMerchant)
{
SetRadiantCrystals(GetRadiantCrystals() - (int32)item->LDoNPrice);
}
else if (aps->Type == DarkReignMerchant)
{
SetEbonCrystals(GetEbonCrystals() - (int32)item->LDoNPrice);
}
int16 charges = 1;
if (item->MaxCharges != 0)
charges = item->MaxCharges;
EQ::ItemInstance *inst = database.CreateItem(item, charges);
if (!AutoPutLootInInventory(*inst, true, true))
{
PutLootInInventory(EQ::invslot::slotCursor, *inst);
}
Save(1);
}
void Client::Handle_OP_AdventureMerchantRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(AdventureMerchant_Struct))
{
LogError("OP size error: OP_AdventureMerchantRequest expected:[{}] got:[{}]", sizeof(AdventureMerchant_Struct), app->size);
return;
}
std::stringstream ss(std::stringstream::in | std::stringstream::out);
uint8 count = 0;
AdventureMerchant_Struct* eid = (AdventureMerchant_Struct*)app->pBuffer;
uint32 merchantid = 0;
Mob* tmp = entity_list.GetMob(eid->entity_id);
if (tmp == 0 || !tmp->IsNPC() || ((tmp->GetClass() != ADVENTUREMERCHANT) &&
(tmp->GetClass() != DISCORD_MERCHANT) && (tmp->GetClass() != NORRATHS_KEEPERS_MERCHANT) && (tmp->GetClass() != DARK_REIGN_MERCHANT)))
return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, tmp->GetPosition()) > USE_NPC_RANGE2)
return;
merchantid = tmp->CastToNPC()->MerchantType;
const EQ::ItemData *item = nullptr;
std::list<MerchantList> merlist = zone->merchanttable[merchantid];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end() && count<255; ++itr) {
const MerchantList &ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tmp->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (item)
{
uint32 theme;
if (item->LDoNTheme > 16)
{
theme = 0;
}
else if (item->LDoNTheme & 16)
{
theme = 5;
}
else if (item->LDoNTheme & 8)
{
theme = 4;
}
else if (item->LDoNTheme & 4)
{
theme = 3;
}
else if (item->LDoNTheme & 2)
{
theme = 2;
}
else if (item->LDoNTheme & 1)
{
theme = 1;
}
else
{
theme = 0;
}
ss << "^" << item->Name << "|";
ss << item->ID << "|";
ss << item->LDoNPrice << "|";
ss << theme << "|";
ss << (item->Stackable ? 1 : 0) << "|";
ss << (item->LoreFlag ? 1 : 0) << "|";
ss << item->Races << "|";
ss << item->Classes;
count++;
}
}
//Count
//^Item Name,Item ID,Cost in Points,Theme (0=none),0,1,races bit map,classes bitmap
EQApplicationPacket* outapp = new EQApplicationPacket(OP_AdventureMerchantResponse, ss.str().size() + 2);
outapp->pBuffer[0] = count;
strn0cpy((char*)&outapp->pBuffer[1], ss.str().c_str(), ss.str().size());
FastQueuePacket(&outapp);
}
void Client::Handle_OP_AdventureMerchantSell(const EQApplicationPacket *app)
{
if (app->size != sizeof(Adventure_Sell_Struct))
{
LogDebug("Size mismatch on OP_AdventureMerchantSell: got [{}] expected [{}]", app->size, sizeof(Adventure_Sell_Struct));
DumpPacket(app);
return;
}
Adventure_Sell_Struct *ams_in = (Adventure_Sell_Struct*)app->pBuffer;
Mob* vendor = entity_list.GetMob(ams_in->npcid);
if (vendor == 0 || !vendor->IsNPC() || ((vendor->GetClass() != ADVENTUREMERCHANT) &&
(vendor->GetClass() != NORRATHS_KEEPERS_MERCHANT) && (vendor->GetClass() != DARK_REIGN_MERCHANT)))
{
Message(Chat::Red, "Vendor was not found.");
return;
}
if (DistanceSquared(m_Position, vendor->GetPosition()) > USE_NPC_RANGE2)
{
Message(Chat::Red, "Vendor is out of range.");
return;
}
uint32 itemid = GetItemIDAt(ams_in->slot);
if (itemid == 0)
{
Message(Chat::Red, "Found no item at that slot.");
return;
}
const EQ::ItemData* item = database.GetItem(itemid);
EQ::ItemInstance* inst = GetInv().GetItem(ams_in->slot);
if (!item || !inst) {
Message(Chat::Red, "You seemed to have misplaced that item...");
return;
}
// Note that Lucy has ldonsold values of 4 and 5 for items sold by Norrath's Keepers and Dark Reign, whereas 13th Floor
// has ldonsold = 0 for these items, so some manual editing of the items DB will be required to support sell back of the
// items.
//
// The Merchant seems to have some other way of knowing whether he will accept the item, other than the ldonsold field,
// e.g. if you summon items 76036 and 76053 (good and evil versions of Spell: Ward Of Vengeance), if you are interacting
// with a Norrath's Keeper merchant and click on 76036 in your inventory, he says he will give you radiant crystals for
// it, but he will refuse for item 76053.
//
// Similarly, just giving a cloth cap an ldonsold value of 4 will not make the Merchant buy it.
//
// Note that the the Client will not allow you to sell anything back to a Discord merchant, so there is no need to handle
// that case here.
if (item->LDoNSold == 0)
{
Message(Chat::Red, "The merchant does not want that item.");
return;
}
if (item->LDoNPrice == 0)
{
Message(Chat::Red, "The merchant does not want that item.");
return;
}
// 06/11/2016 This formula matches RoF2 client side calculation.
int32 price = (item->LDoNPrice + 1) * item->LDoNSellBackRate / 100;
if (price == 0)
{
Message(Chat::Red, "The merchant does not want that item.");
return;
}
if (RuleB(EventLog, RecordSellToMerchant))
LogMerchant(this, vendor, ams_in->charges, price, item, false);
if (!inst->IsStackable())
{
DeleteItemInInventory(ams_in->slot, 0, false);
}
else
{
if (inst->GetCharges() < ams_in->charges)
{
ams_in->charges = inst->GetCharges();
}
if (ams_in->charges == 0)
{
Message(Chat::Red, "Charge mismatch error.");
return;
}
DeleteItemInInventory(ams_in->slot, ams_in->charges, false);
price *= ams_in->charges;
}
auto outapp = new EQApplicationPacket(OP_AdventureMerchantSell, sizeof(Adventure_Sell_Struct));
Adventure_Sell_Struct *ams = (Adventure_Sell_Struct*)outapp->pBuffer;
ams->slot = ams_in->slot;
ams->unknown000 = 1;
ams->npcid = ams->npcid;
ams->charges = ams_in->charges;
ams->sell_price = price;
FastQueuePacket(&outapp);
switch (vendor->GetClass())
{
case ADVENTUREMERCHANT:
{
UpdateLDoNPoints(6, price);
break;
}
case NORRATHS_KEEPERS_MERCHANT:
{
SetRadiantCrystals(GetRadiantCrystals() + price);
break;
}
case DARK_REIGN_MERCHANT:
{
SetEbonCrystals(GetEbonCrystals() + price);
break;
}
default:
break;
}
Save(1);
}
void Client::Handle_OP_AdventureRequest(const EQApplicationPacket *app)
{
if (app->size < sizeof(AdventureRequest_Struct))
{
LogError("Handle_OP_AdventureRequest had a packet that was too small");
return;
}
if (IsOnAdventure())
{
return;
}
if (!p_timers.Expired(&database, pTimerStartAdventureTimer, false))
{
return;
}
if (GetPendingAdventureRequest())
{
return;
}
AdventureRequest_Struct* ars = (AdventureRequest_Struct*)app->pBuffer;
uint8 group_members = 0;
Raid *r = nullptr;
Group *g = nullptr;
if (IsRaidGrouped())
{
r = GetRaid();
group_members = r->RaidCount();
}
else if (IsGrouped())
{
g = GetGroup();
group_members = g->GroupCount();
}
else
{
return;
}
if (group_members < RuleI(Adventure, MinNumberForGroup) || group_members > RuleI(Adventure, MaxNumberForGroup))
{
return;
}
Mob* m = entity_list.GetMob(ars->entity_id);
uint32 template_id = 0;
if (m && m->IsNPC())
{
template_id = m->CastToNPC()->GetAdventureTemplate();
}
else
{
return;
}
auto packet =
new ServerPacket(ServerOP_AdventureRequest, sizeof(ServerAdventureRequest_Struct) + (64 * group_members));
ServerAdventureRequest_Struct *sar = (ServerAdventureRequest_Struct*)packet->pBuffer;
sar->member_count = group_members;
sar->risk = ars->risk;
sar->type = ars->type;
sar->template_id = template_id;
strcpy(sar->leader, GetName());
if (IsRaidGrouped())
{
int i = 0;
for (int x = 0; x < 72; ++x)
{
if (i == group_members)
{
break;
}
const char *c_name = nullptr;
c_name = r->GetClientNameByIndex(x);
if (c_name)
{
memcpy((packet->pBuffer + sizeof(ServerAdventureRequest_Struct) + (64 * i)), c_name, strlen(c_name));
++i;
}
}
}
else
{
int i = 0;
for (int x = 0; x < 6; ++x)
{
if (i == group_members)
{
break;
}
const char *c_name = nullptr;
c_name = g->GetClientNameByIndex(x);
if (c_name)
{
memcpy((packet->pBuffer + sizeof(ServerAdventureRequest_Struct) + (64 * i)), c_name, strlen(c_name));
++i;
}
}
}
worldserver.SendPacket(packet);
delete packet;
p_timers.Start(pTimerStartAdventureTimer, 5);
}
void Client::Handle_OP_AdventureStatsRequest(const EQApplicationPacket *app)
{
if (adventure_stats_timer)
{
return;
}
adventure_stats_timer = new Timer(8000);
auto outapp = new EQApplicationPacket(OP_AdventureStatsReply, sizeof(AdventureStats_Struct));
AdventureStats_Struct *as = (AdventureStats_Struct*)outapp->pBuffer;
if (database.GetAdventureStats(CharacterID(), as))
{
m_pp.ldon_wins_guk = as->success.guk;
m_pp.ldon_wins_mir = as->success.mir;
m_pp.ldon_wins_mmc = as->success.mmc;
m_pp.ldon_wins_ruj = as->success.ruj;
m_pp.ldon_wins_tak = as->success.tak;
m_pp.ldon_losses_guk = as->failure.guk;
m_pp.ldon_losses_mir = as->failure.mir;
m_pp.ldon_losses_mmc = as->failure.mmc;
m_pp.ldon_losses_ruj = as->failure.ruj;
m_pp.ldon_losses_tak = as->failure.tak;
}
FastQueuePacket(&outapp);
}
void Client::Handle_OP_AggroMeterLockTarget(const EQApplicationPacket *app)
{
if (app->size < sizeof(uint32)) {
LogError("Handle_OP_AggroMeterLockTarget had a packet that was too small");
return;
}
SetAggroMeterLock(app->ReadUInt32(0));
ProcessAggroMeter();
}
void Client::Handle_OP_AltCurrencyMerchantRequest(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencyMerchantRequest, app, uint32);
NPC* tar = entity_list.GetNPCByID(*((uint32*)app->pBuffer));
if (tar) {
if (DistanceSquared(m_Position, tar->GetPosition()) > USE_NPC_RANGE2)
return;
if (tar->GetClass() != ALT_CURRENCY_MERCHANT) {
return;
}
uint32 alt_cur_id = tar->GetAltCurrencyType();
if (alt_cur_id == 0) {
return;
}
auto altc_iter = zone->AlternateCurrencies.begin();
bool found = false;
while (altc_iter != zone->AlternateCurrencies.end()) {
if ((*altc_iter).id == alt_cur_id) {
found = true;
break;
}
++altc_iter;
}
if (!found) {
return;
}
std::stringstream ss(std::stringstream::in | std::stringstream::out);
std::stringstream item_ss(std::stringstream::in | std::stringstream::out);
ss << alt_cur_id << "|1|" << alt_cur_id;
uint32 count = 0;
uint32 merchant_id = tar->MerchantType;
const EQ::ItemData *item = nullptr;
std::list<MerchantList> merlist = zone->merchanttable[merchant_id];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end() && count < 255; ++itr) {
const MerchantList &ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tar->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (item)
{
item_ss << "^" << item->Name << "|";
item_ss << item->ID << "|";
item_ss << ml.alt_currency_cost << "|";
item_ss << "0|";
item_ss << "1|";
item_ss << item->Races << "|";
item_ss << item->Classes;
count++;
}
}
if (count > 0) {
ss << "|" << count << item_ss.str();
}
else {
ss << "|0";
}
EQApplicationPacket* outapp = new EQApplicationPacket(OP_AltCurrencyMerchantReply, ss.str().length() + 1);
memcpy(outapp->pBuffer, ss.str().c_str(), ss.str().length());
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_AltCurrencyPurchase(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencyPurchase, app, AltCurrencyPurchaseItem_Struct);
AltCurrencyPurchaseItem_Struct *purchase = (AltCurrencyPurchaseItem_Struct*)app->pBuffer;
NPC* tar = entity_list.GetNPCByID(purchase->merchant_entity_id);
if (tar) {
if (DistanceSquared(m_Position, tar->GetPosition())> USE_NPC_RANGE2)
return;
if (tar->GetClass() != ALT_CURRENCY_MERCHANT) {
return;
}
uint32 alt_cur_id = tar->GetAltCurrencyType();
if (alt_cur_id == 0) {
return;
}
const EQ::ItemData* item = nullptr;
uint32 cost = 0;
uint32 current_currency = GetAlternateCurrencyValue(alt_cur_id);
uint32 merchant_id = tar->MerchantType;
bool found = false;
std::list<MerchantList> merlist = zone->merchanttable[merchant_id];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tar->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (!item)
continue;
if (item->ID == purchase->item_id) { //This check to make sure that the item is actually on the NPC, people attempt to inject packets to get items summoned...
cost = ml.alt_currency_cost;
found = true;
break;
}
}
if (!item || !found) {
Message(Chat::Red, "Error: The item you purchased does not exist!");
return;
}
if (cost > current_currency) {
Message(Chat::Red, "You cannot afford that item right now.");
return;
}
if (CheckLoreConflict(item))
{
Message(Chat::Yellow, "You can only have one of a lore item.");
return;
}
/* QS: PlayerLogAlternateCurrencyTransactions :: Merchant Purchase */
if (RuleB(QueryServ, PlayerLogAlternateCurrencyTransactions)) {
std::string event_desc = StringFormat("Merchant Purchase :: Spent alt_currency_id:%i cost:%i for itemid:%i in zoneid:%i instid:%i", alt_cur_id, cost, item->ID, this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Alternate_Currency_Transactions, this->CharacterID(), event_desc);
}
AddAlternateCurrencyValue(alt_cur_id, -((int32)cost));
int16 charges = 1;
if (item->MaxCharges != 0)
charges = item->MaxCharges;
EQ::ItemInstance *inst = database.CreateItem(item, charges);
if (!AutoPutLootInInventory(*inst, true, true))
{
PutLootInInventory(EQ::invslot::slotCursor, *inst);
}
Save(1);
}
}
void Client::Handle_OP_AltCurrencyReclaim(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencyReclaim, app, AltCurrencyReclaim_Struct);
AltCurrencyReclaim_Struct *reclaim = (AltCurrencyReclaim_Struct*)app->pBuffer;
uint32 item_id = 0;
auto iter = zone->AlternateCurrencies.begin();
while (iter != zone->AlternateCurrencies.end()) {
if ((*iter).id == reclaim->currency_id) {
item_id = (*iter).item_id;
}
++iter;
}
if (item_id == 0) {
return;
}
/* Item to Currency Storage */
if (reclaim->reclaim_flag == 1) {
uint32 removed = NukeItem(item_id, invWhereWorn | invWherePersonal | invWhereCursor);
if (removed > 0) {
AddAlternateCurrencyValue(reclaim->currency_id, removed);
/* QS: PlayerLogAlternateCurrencyTransactions :: Item to Currency */
if (RuleB(QueryServ, PlayerLogAlternateCurrencyTransactions)) {
std::string event_desc = StringFormat("Reclaim :: Item to Currency :: alt_currency_id:%i amount:%i to currency tab in zoneid:%i instid:%i", reclaim->currency_id, removed, this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Alternate_Currency_Transactions, this->CharacterID(), event_desc);
}
}
}
/* Cursor to Item storage */
else {
uint32 max_currency = GetAlternateCurrencyValue(reclaim->currency_id);
if (max_currency == 0 || reclaim->count == 0)
return;
/* If you input more than you have currency wise, just give the max of the currency you currently have */
if (reclaim->count > max_currency) {
SummonItem(item_id, max_currency);
SetAlternateCurrencyValue(reclaim->currency_id, 0);
}
else {
SummonItem(item_id, reclaim->count, 0, 0, 0, 0, 0, 0, false, EQ::invslot::slotCursor);
AddAlternateCurrencyValue(reclaim->currency_id, -((int32)reclaim->count));
}
/* QS: PlayerLogAlternateCurrencyTransactions :: Cursor to Item Storage */
if (RuleB(QueryServ, PlayerLogAlternateCurrencyTransactions)) {
std::string event_desc = StringFormat("Reclaim :: Cursor to Item :: alt_currency_id:%i amount:-%i in zoneid:%i instid:%i", reclaim->currency_id, reclaim->count, this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Alternate_Currency_Transactions, this->CharacterID(), event_desc);
}
}
}
void Client::Handle_OP_AltCurrencySell(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencySell, app, AltCurrencySellItem_Struct);
EQApplicationPacket *outapp = app->Copy();
AltCurrencySellItem_Struct *sell = (AltCurrencySellItem_Struct*)outapp->pBuffer;
NPC* tar = entity_list.GetNPCByID(sell->merchant_entity_id);
if (tar) {
if (DistanceSquared(m_Position, tar->GetPosition()) > USE_NPC_RANGE2)
return;
if (tar->GetClass() != ALT_CURRENCY_MERCHANT) {
return;
}
uint32 alt_cur_id = tar->GetAltCurrencyType();
if (alt_cur_id == 0) {
return;
}
EQ::ItemInstance* inst = GetInv().GetItem(sell->slot_id);
if (!inst) {
return;
}
if (!RuleB(Merchant, EnableAltCurrencySell)) {
return;
}
const EQ::ItemData* item = nullptr;
uint32 cost = 0;
uint32 current_currency = GetAlternateCurrencyValue(alt_cur_id);
uint32 merchant_id = tar->MerchantType;
uint32 npc_id = tar->GetNPCTypeID();
bool found = false;
std::list<MerchantList> merlist = zone->merchanttable[merchant_id];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tar->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (!item)
continue;
if (item->ID == inst->GetItem()->ID) {
cost = ml.alt_currency_cost;
found = true;
break;
}
}
if (!found) {
return;
}
if (!inst->IsStackable())
{
DeleteItemInInventory(sell->slot_id, 0, false);
}
else
{
if (inst->GetCharges() < sell->charges)
{
sell->charges = inst->GetCharges();
}
if (sell->charges == 0)
{
Message(Chat::Red, "Charge mismatch error.");
return;
}
DeleteItemInInventory(sell->slot_id, sell->charges, false);
cost *= sell->charges;
}
sell->cost = cost;
/* QS: PlayerLogAlternateCurrencyTransactions :: Sold to Merchant*/
if (RuleB(QueryServ, PlayerLogAlternateCurrencyTransactions)) {
std::string event_desc = StringFormat("Sold to Merchant :: itemid:%u npcid:%u alt_currency_id:%u cost:%u in zoneid:%u instid:%i", item->ID, npc_id, alt_cur_id, cost, this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Alternate_Currency_Transactions, this->CharacterID(), event_desc);
}
FastQueuePacket(&outapp);
AddAlternateCurrencyValue(alt_cur_id, cost);
Save(1);
}
}
void Client::Handle_OP_AltCurrencySellSelection(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencySellSelection, app, AltCurrencySelectItem_Struct);
AltCurrencySelectItem_Struct *select = (AltCurrencySelectItem_Struct*)app->pBuffer;
NPC* tar = entity_list.GetNPCByID(select->merchant_entity_id);
if (tar) {
if (DistanceSquared(m_Position, tar->GetPosition()) > USE_NPC_RANGE2)
return;
if (tar->GetClass() != ALT_CURRENCY_MERCHANT) {
return;
}
uint32 alt_cur_id = tar->GetAltCurrencyType();
if (alt_cur_id == 0) {
return;
}
EQ::ItemInstance *inst = m_inv.GetItem(select->slot_id);
if (!inst) {
return;
}
const EQ::ItemData* item = nullptr;
uint32 cost = 0;
uint32 current_currency = GetAlternateCurrencyValue(alt_cur_id);
uint32 merchant_id = tar->MerchantType;
if (RuleB(Merchant, EnableAltCurrencySell)) {
bool found = false;
std::list<MerchantList> merlist = zone->merchanttable[merchant_id];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tar->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (!item)
continue;
if (item->ID == inst->GetItem()->ID) {
cost = ml.alt_currency_cost;
found = true;
break;
}
}
if (!found) {
cost = 0;
}
}
else {
cost = 0;
}
auto outapp =
new EQApplicationPacket(OP_AltCurrencySellSelection, sizeof(AltCurrencySelectItemReply_Struct));
AltCurrencySelectItemReply_Struct *reply = (AltCurrencySelectItemReply_Struct*)outapp->pBuffer;
reply->unknown004 = 0xFF;
reply->unknown005 = 0xFF;
reply->unknown006 = 0xFF;
reply->unknown007 = 0xFF;
strcpy(reply->item_name, inst->GetItem()->Name);
reply->cost = cost;
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_Animation(const EQApplicationPacket *app)
{
if (app->size != sizeof(Animation_Struct)) {
LogError("Received invalid sized OP_Animation: got [{}], expected [{}]", app->size, sizeof(Animation_Struct));
DumpPacket(app);
return;
}
Animation_Struct *s = (Animation_Struct *)app->pBuffer;
//might verify spawn ID, but it wouldent affect anything
DoAnim(s->action, s->speed);
return;
}
void Client::Handle_OP_ApplyPoison(const EQApplicationPacket *app)
{
if (app->size != sizeof(ApplyPoison_Struct)) {
LogError("Wrong size: OP_ApplyPoison, size=[{}], expected [{}]", app->size, sizeof(ApplyPoison_Struct));
DumpPacket(app);
return;
}
ApplyPoison_Struct* ApplyPoisonData = (ApplyPoison_Struct*)app->pBuffer;
uint32 ApplyPoisonSuccessResult = 0;
const EQ::ItemInstance* PoisonItemInstance = GetInv().GetItem(ApplyPoisonData->inventorySlot);
const EQ::ItemData* poison = (PoisonItemInstance ? PoisonItemInstance->GetItem() : nullptr);
bool IsPoison = (poison && poison->ItemType == EQ::item::ItemTypePoison);
if (IsPoison && GetClass() == ROGUE) {
// Live always checks for skillup, even when poison is too high
CheckIncreaseSkill(EQ::skills::SkillApplyPoison, nullptr, 10);
if (poison->Proc.Level2 > GetLevel()) {
// Poison is too high to apply.
MessageString(Chat::LightBlue, POISON_TOO_HIGH);
}
else {
double ChanceRoll = zone->random.Real(0, 1);
// Poisons that use this skill (old world poisons) almost
// never fail to apply. I did 25 applies of a trivial 120+
// poison with an apply skill of 48 and they all worked.
// Also did 25 straight poisons at apply skill 248 for very
// high end and they never failed.
// Apply poison ranging from 1-9, 28/30 worked for a level 18..
// Poisons that don't proc until a level higher than the
// rogue simply won't apply at all, no skill check done.
uint16 poison_skill = GetSkill(EQ::skills::SkillApplyPoison);
if (ChanceRoll < (.75 + poison_skill / 1000)) {
ApplyPoisonSuccessResult = 1;
AddProcToWeapon(poison->Proc.Effect, false, (GetDEX() / 100) + 103, POISON_PROC);
}
}
// Live always deletes the item, success or failure. Even if too high.
DeleteItemInInventory(ApplyPoisonData->inventorySlot, 1, true);
}
auto outapp = new EQApplicationPacket(OP_ApplyPoison, nullptr, sizeof(ApplyPoison_Struct));
ApplyPoison_Struct* ApplyPoisonResult = (ApplyPoison_Struct*)outapp->pBuffer;
ApplyPoisonResult->success = ApplyPoisonSuccessResult;
ApplyPoisonResult->inventorySlot = ApplyPoisonData->inventorySlot;
FastQueuePacket(&outapp);
}
void Client::Handle_OP_Assist(const EQApplicationPacket *app)
{
if (app->size != sizeof(EntityId_Struct)) {
LogDebug("Size mismatch in OP_Assist expected [{}] got [{}]", sizeof(EntityId_Struct), app->size);
return;
}
EntityId_Struct* eid = (EntityId_Struct*)app->pBuffer;
Entity* entity = entity_list.GetID(eid->entity_id);
EQApplicationPacket* outapp = app->Copy();
eid = (EntityId_Struct*)outapp->pBuffer;
if (RuleB(Combat, AssistNoTargetSelf))
eid->entity_id = GetID();
if (entity && entity->IsMob()) {
Mob *assistee = entity->CastToMob();
if (assistee->GetTarget()) {
Mob *new_target = assistee->GetTarget();
if (new_target && (GetGM() ||
Distance(m_Position, assistee->GetPosition()) <= TARGETING_RANGE)) {
cheat_manager.SetExemptStatus(Assist, true);
eid->entity_id = new_target->GetID();
} else {
eid->entity_id = 0;
}
} else {
eid->entity_id = 0;
}
}
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_AssistGroup(const EQApplicationPacket *app)
{
if (app->size != sizeof(EntityId_Struct)) {
LogDebug("Size mismatch in OP_AssistGroup expected [{}] got [{}]", sizeof(EntityId_Struct), app->size);
return;
}
QueuePacket(app);
return;
}
void Client::Handle_OP_AugmentInfo(const EQApplicationPacket *app)
{
// This packet is sent by the client when an Augment item information window is opened.
// Some clients this seems to nuke the charm text (ex. Adventurer's Stone)
if (app->size != sizeof(AugmentInfo_Struct)) {
LogDebug("Size mismatch in OP_AugmentInfo expected [{}] got [{}]", sizeof(AugmentInfo_Struct), app->size);
DumpPacket(app);
return;
}
AugmentInfo_Struct* AugInfo = (AugmentInfo_Struct*)app->pBuffer;
const EQ::ItemData * item = database.GetItem(AugInfo->itemid);
if (item) {
strn0cpy(AugInfo->augment_info, item->Name, 64);
AugInfo->itemid = 0;
QueuePacket(app);
}
}
void Client::Handle_OP_AugmentItem(const EQApplicationPacket *app)
{
if (app->size != sizeof(AugmentItem_Struct)) {
LogError("Invalid size for AugmentItem_Struct: Expected: [{}], Got: [{}]",
sizeof(AugmentItem_Struct), app->size);
return;
}
AugmentItem_Struct* in_augment = (AugmentItem_Struct*)app->pBuffer;
bool deleteItems = false;
if (ClientVersion() >= EQ::versions::ClientVersion::RoF)
{
if ((in_augment->container_slot < EQ::invslot::EQUIPMENT_BEGIN || in_augment->container_slot > EQ::invslot::GENERAL_END) &&
(in_augment->container_slot < EQ::invbag::GENERAL_BAGS_BEGIN || in_augment->container_slot > EQ::invbag::GENERAL_BAGS_END))
{
Message(Chat::Red, "The server does not allow augmentation actions from this slot.");
auto cursor_item = m_inv[EQ::invslot::slotCursor];
auto augmented_item = m_inv[in_augment->container_slot];
SendItemPacket(EQ::invslot::slotCursor, cursor_item, ItemPacketCharInventory);
// this may crash clients on certain slots
SendItemPacket(in_augment->container_slot, augmented_item, ItemPacketCharInventory);
return;
}
EQ::ItemInstance *itemOneToPush = nullptr, *itemTwoToPush = nullptr;
//Log(Logs::DebugLevel::Moderate, Logs::Debug, "cslot: [{}] aslot: [{}] cidx: [{}] aidx: [{}] act: [{}] dest: [{}]",
// in_augment->container_slot, in_augment->augment_slot, in_augment->container_index, in_augment->augment_index, in_augment->augment_action, in_augment->dest_inst_id);
EQ::ItemInstance *tobe_auged = nullptr, *old_aug = nullptr, *new_aug = nullptr, *aug = nullptr, *solvent = nullptr;
EQ::InventoryProfile& user_inv = GetInv();
uint16 item_slot = in_augment->container_slot;
uint16 solvent_slot = in_augment->augment_slot;
uint8 mat = EQ::InventoryProfile::CalcMaterialFromSlot(item_slot); // for when player is augging a piece of equipment while they're wearing it
if (item_slot == INVALID_INDEX || solvent_slot == INVALID_INDEX)
{
Message(Chat::Red, "Error: Invalid Aug Index.");
return;
}
tobe_auged = user_inv.GetItem(item_slot);
solvent = user_inv.GetItem(solvent_slot);
if (!tobe_auged)
{
Message(Chat::Red, "Error: Invalid item passed for augmenting.");
return;
}
if ((in_augment->augment_action == 1) || (in_augment->augment_action == 2))
{
// Check for valid distiller if safely removing / swapping an augmentation
if (!solvent)
{
old_aug = tobe_auged->GetAugment(in_augment->augment_index);
if (!old_aug || old_aug->GetItem()->AugDistiller != 0) {
LogError("Player tried to safely remove an augment without a distiller");
Message(Chat::Red, "Error: Missing an augmentation distiller for safely removing this augment.");
return;
}
}
else if (solvent->GetItem()->ItemType == EQ::item::ItemTypeAugmentationDistiller)
{
old_aug = tobe_auged->GetAugment(in_augment->augment_index);
if (!old_aug)
{
LogError("Player tried to safely remove a nonexistent augment");
Message(Chat::Red, "Error: No augment found in slot %i for safely removing.", in_augment->augment_index);
return;
}
else if (solvent->GetItem()->ID != old_aug->GetItem()->AugDistiller)
{
LogError("Player tried to safely remove an augment with the wrong distiller (item [{}] vs expected [{}])", solvent->GetItem()->ID, old_aug->GetItem()->AugDistiller);
Message(Chat::Red, "Error: Wrong augmentation distiller for safely removing this augment.");
return;
}
}
else if (solvent->GetItem()->ItemType != EQ::item::ItemTypePerfectedAugmentationDistiller)
{
LogError("Player tried to safely remove an augment with a non-distiller item");
Message(Chat::Red, "Error: Invalid augmentation distiller for safely removing this augment.");
return;
}
}
switch (in_augment->augment_action)
{
case 0: // Adding an augment
case 2: // Swapping augment
new_aug = user_inv.GetItem(EQ::invslot::slotCursor);
if (!new_aug) // Shouldn't get the OP code without the augment on the user's cursor, but maybe it's h4x.
{
LogError("AugmentItem OpCode with 'Insert' or 'Swap' action received, but no augment on client's cursor");
Message(Chat::Red, "Error: No augment found on cursor for inserting.");
return;
}
else
{
if (((tobe_auged->IsAugmentSlotAvailable(new_aug->GetAugmentType(), in_augment->augment_index)) != -1) &&
(tobe_auged->AvailableWearSlot(new_aug->GetItem()->Slots)))
{
old_aug = tobe_auged->RemoveAugment(in_augment->augment_index);
if (old_aug)
{
// An old augment was removed in order to be replaced with the new one (augment_action 2)
CalcBonuses();
std::vector<EQ::Any> args;
args.push_back(old_aug);
parse->EventItem(EVENT_UNAUGMENT_ITEM, this, tobe_auged, nullptr, "", in_augment->augment_index, &args);
args.assign(1, tobe_auged);
args.push_back(false);
parse->EventItem(EVENT_AUGMENT_REMOVE, this, old_aug, nullptr, "", in_augment->augment_index, &args);
}
tobe_auged->PutAugment(in_augment->augment_index, *new_aug);
tobe_auged->UpdateOrnamentationInfo();
aug = tobe_auged->GetAugment(in_augment->augment_index);
if (aug)
{
std::vector<EQ::Any> args;
args.push_back(aug);
parse->EventItem(EVENT_AUGMENT_ITEM, this, tobe_auged, nullptr, "", in_augment->augment_index, &args);
args.assign(1, tobe_auged);
parse->EventItem(EVENT_AUGMENT_INSERT, this, aug, nullptr, "", in_augment->augment_index, &args);
}
else
{
Message(Chat::Red, "Error: Could not properly insert augmentation into augment slot %i. Aborting.", in_augment->augment_index);
return;
}
itemOneToPush = tobe_auged->Clone();
if (old_aug)
{
itemTwoToPush = old_aug->Clone();
}
// Must push items after the items in inventory are deleted - necessary due to lore items...
if (itemOneToPush)
{
DeleteItemInInventory(item_slot, 0, true);
DeleteItemInInventory(EQ::invslot::slotCursor, new_aug->IsStackable() ? 1 : 0, true);
if (solvent)
{
// Consume the augment distiller
DeleteItemInInventory(solvent_slot, solvent->IsStackable() ? 1 : 0, true);
}
if (itemTwoToPush)
{
// This is a swap. Return the old aug to the player's cursor.
if (!PutItemInInventory(EQ::invslot::slotCursor, *itemTwoToPush, true))
{
LogError("Problem returning old augment to player's cursor after augmentation swap");
Message(Chat::Yellow, "Error: Failed to retrieve old augment after augmentation swap!");
}
}
if (PutItemInInventory(item_slot, *itemOneToPush, true))
{
// Successfully added an augment to the item
CalcBonuses();
if (mat != EQ::textures::materialInvalid)
{
SendWearChange(mat); // Visible item augged while equipped. Send WC in case ornamentation changed.
}
}
else
{
Message(Chat::Red, "Error: No available slot for end result. Please free up the augment slot.");
}
}
else
{
Message(Chat::Red, "Error in cloning item for augment. Aborted.");
}
}
else
{
Message(Chat::Red, "Error: No available slot for augment in that item.");
}
}
break;
case 1: // Removing augment safely (distiller)
aug = tobe_auged->GetAugment(in_augment->augment_index);
if (aug)
{
std::vector<EQ::Any> args;
args.push_back(aug);
parse->EventItem(EVENT_UNAUGMENT_ITEM, this, tobe_auged, nullptr, "", in_augment->augment_index, &args);
args.assign(1, tobe_auged);
args.push_back(false);
parse->EventItem(EVENT_AUGMENT_REMOVE, this, aug, nullptr, "", in_augment->augment_index, &args);
}
else
{
Message(Chat::Red, "Error: Could not find augmentation to remove at index %i. Aborting.", in_augment->augment_index);
return;
}
old_aug = tobe_auged->RemoveAugment(in_augment->augment_index);
tobe_auged->UpdateOrnamentationInfo();
itemOneToPush = tobe_auged->Clone();
if (old_aug)
itemTwoToPush = old_aug->Clone();
if (itemOneToPush && itemTwoToPush)
{
// Consume the augment distiller
if (solvent)
DeleteItemInInventory(solvent_slot, solvent->IsStackable() ? 1 : 0, true);
// Remove the augmented item
DeleteItemInInventory(item_slot, 0, true);
// Replace it with the unaugmented item
if (!PutItemInInventory(item_slot, *itemOneToPush, true))
{
LogError("Problem returning equipment item to player's inventory after safe augment removal");
Message(Chat::Yellow, "Error: Failed to return item after de-augmentation!");
}
CalcBonuses();
if (mat != EQ::textures::materialInvalid)
{
SendWearChange(mat); // Visible item augged while equipped. Send WC in case ornamentation changed.
}
// Drop the removed augment on the player's cursor
if (!PutItemInInventory(EQ::invslot::slotCursor, *itemTwoToPush, true))
{
LogError("Problem returning augment to player's cursor after safe removal");
Message(Chat::Yellow, "Error: Failed to return augment after removal from item!");
return;
}
}
break;
case 3: // Destroying augment (formerly done in birdbath/sealer with a solvent)
// RoF client does not require an augmentation solvent for destroying an augmentation in an item.
// Augments can be destroyed with a right click -> Destroy at any time.
aug = tobe_auged->GetAugment(in_augment->augment_index);
if (aug)
{
std::vector<EQ::Any> args;
args.push_back(aug);
parse->EventItem(EVENT_UNAUGMENT_ITEM, this, tobe_auged, nullptr, "", in_augment->augment_index, &args);
args.assign(1, tobe_auged);
args.push_back(true);
parse->EventItem(EVENT_AUGMENT_REMOVE, this, aug, nullptr, "", in_augment->augment_index, &args);
}
else
{
Message(Chat::Red, "Error: Could not find augmentation to remove at index %i. Aborting.");
return;
}
tobe_auged->DeleteAugment(in_augment->augment_index);
tobe_auged->UpdateOrnamentationInfo();
itemOneToPush = tobe_auged->Clone();
if (itemOneToPush)
{
DeleteItemInInventory(item_slot, 0, true);
if (!PutItemInInventory(item_slot, *itemOneToPush, true))
{
LogError("Problem returning equipment item to player's inventory after augment deletion");
Message(Chat::Yellow, "Error: Failed to return item after destroying augment!");
}
}
CalcBonuses();
if (mat != EQ::textures::materialInvalid)
{
SendWearChange(mat);
}
break;
default: // Unknown
LogInventory("Unrecognized augmentation action - cslot: [{}] aslot: [{}] cidx: [{}] aidx: [{}] act: [{}] dest: [{}]",
in_augment->container_slot, in_augment->augment_slot, in_augment->container_index, in_augment->augment_index, in_augment->augment_action, in_augment->dest_inst_id);
break;
}
}
else
{
// Delegate to tradeskill object to perform combine
Object::HandleAugmentation(this, in_augment, m_tradeskill_object);
}
return;
}
void Client::Handle_OP_AutoAttack(const EQApplicationPacket *app)
{
if (app->size != 4) {
LogError("OP size error: OP_AutoAttack expected:4 got:[{}]", app->size);
return;
}
if (app->pBuffer[0] == 0) {
auto_attack = false;
if (IsAIControlled()) {
return;
}
attack_timer.Disable();
ranged_timer.Disable();
attack_dw_timer.Disable();
m_AutoAttackPosition = glm::vec4();
m_AutoAttackTargetLocation = glm::vec3();
aa_los_them_mob = nullptr;
}
else if (app->pBuffer[0] == 1) {
auto_attack = true;
auto_fire = false;
if (IsAIControlled()) {
return;
}
SetAttackTimer();
if (GetTarget()) {
aa_los_them_mob = GetTarget();
m_AutoAttackPosition = GetPosition();
m_AutoAttackTargetLocation = glm::vec3(aa_los_them_mob->GetPosition());
los_status = CheckLosFN(aa_los_them_mob);
los_status_facing = IsFacingMob(aa_los_them_mob);
}
else {
m_AutoAttackPosition = GetPosition();
m_AutoAttackTargetLocation = glm::vec3();
aa_los_them_mob = nullptr;
los_status = false;
los_status_facing = false;
}
}
}
void Client::Handle_OP_AutoAttack2(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_AutoFire(const EQApplicationPacket *app)
{
if (app->size != sizeof(bool)) {
LogDebug("Size mismatch in OP_AutoFire expected [{}] got [{}]", sizeof(bool), app->size);
DumpPacket(app);
return;
}
bool *af = (bool*)app->pBuffer;
auto_fire = *af;
auto_attack = false;
SetAttackTimer();
}
void Client::Handle_OP_Bandolier(const EQApplicationPacket *app)
{
// Although there are three different structs for OP_Bandolier, they are all the same size.
//
if (app->size != sizeof(BandolierCreate_Struct)) {
LogDebug("Size mismatch in OP_Bandolier expected [{}] got [{}]", sizeof(BandolierCreate_Struct), app->size);
DumpPacket(app);
return;
}
BandolierCreate_Struct *bs = (BandolierCreate_Struct*)app->pBuffer;
switch (bs->Action)
{
case bandolierCreate:
CreateBandolier(app);
break;
case bandolierRemove:
RemoveBandolier(app);
break;
case bandolierSet:
SetBandolier(app);
break;
default:
LogDebug("Unknown Bandolier action [{}]", bs->Action);
break;
}
}
void Client::Handle_OP_BankerChange(const EQApplicationPacket *app)
{
if (app->size != sizeof(BankerChange_Struct) && app->size != 4) //Titanium only sends 4 Bytes for this
{
LogDebug("Size mismatch in OP_BankerChange expected [{}] got [{}]", sizeof(BankerChange_Struct), app->size);
DumpPacket(app);
return;
}
uint32 distance = 0;
NPC *banker = entity_list.GetClosestBanker(this, distance);
if (!banker || distance > USE_NPC_RANGE2)
{
auto hacked_string = fmt::format(
"Player tried to make use of a banker(money) but {} is non-existant or too far away ({} units).",
banker ? banker->GetName() : "UNKNOWN NPC", distance);
database.SetMQDetectionFlag(AccountName(), GetName(), hacked_string, zone->GetShortName());
return;
}
auto outapp = new EQApplicationPacket(OP_BankerChange, nullptr, sizeof(BankerChange_Struct));
BankerChange_Struct *bc = (BankerChange_Struct *)outapp->pBuffer;
if (m_pp.platinum < 0)
m_pp.platinum = 0;
if (m_pp.gold < 0)
m_pp.gold = 0;
if (m_pp.silver < 0)
m_pp.silver = 0;
if (m_pp.copper < 0)
m_pp.copper = 0;
if (m_pp.platinum_bank < 0)
m_pp.platinum_bank = 0;
if (m_pp.gold_bank < 0)
m_pp.gold_bank = 0;
if (m_pp.silver_bank < 0)
m_pp.silver_bank = 0;
if (m_pp.copper_bank < 0)
m_pp.copper_bank = 0;
uint64 cp = static_cast<uint64>(m_pp.copper) +
(static_cast<uint64>(m_pp.silver) * 10) +
(static_cast<uint64>(m_pp.gold) * 100) +
(static_cast<uint64>(m_pp.platinum) * 1000);
m_pp.copper = cp % 10;
cp /= 10;
m_pp.silver = cp % 10;
cp /= 10;
m_pp.gold = cp % 10;
cp /= 10;
m_pp.platinum = cp;
cp = static_cast<uint64>(m_pp.copper_bank) +
(static_cast<uint64>(m_pp.silver_bank) * 10) +
(static_cast<uint64>(m_pp.gold_bank) * 100) +
(static_cast<uint64>(m_pp.platinum_bank) * 1000);
m_pp.copper_bank = cp % 10;
cp /= 10;
m_pp.silver_bank = cp % 10;
cp /= 10;
m_pp.gold_bank = cp % 10;
cp /= 10;
m_pp.platinum_bank = cp;
bc->copper = m_pp.copper;
bc->silver = m_pp.silver;
bc->gold = m_pp.gold;
bc->platinum = m_pp.platinum;
bc->copper_bank = m_pp.copper_bank;
bc->silver_bank = m_pp.silver_bank;
bc->gold_bank = m_pp.gold_bank;
bc->platinum_bank = m_pp.platinum_bank;
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_Barter(const EQApplicationPacket *app)
{
if (app->size < 4)
{
LogDebug("OP_Barter packet below minimum expected size. The packet was [{}] bytes", app->size);
DumpPacket(app);
return;
}
char* Buf = (char *)app->pBuffer;
// The first 4 bytes of the packet determine the action. A lot of Barter packets require the
// packet the client sent, sent back to it as an acknowledgement.
//
uint32 Action = VARSTRUCT_DECODE_TYPE(uint32, Buf);
switch (Action)
{
case Barter_BuyerSearch:
{
BuyerItemSearch(app);
break;
}
case Barter_SellerSearch:
{
BarterSearchRequest_Struct *bsr = (BarterSearchRequest_Struct*)app->pBuffer;
SendBuyerResults(bsr->SearchString, bsr->SearchID);
break;
}
case Barter_BuyerModeOn:
{
if (!Trader) {
ToggleBuyerMode(true);
}
else {
Buf = (char *)app->pBuffer;
VARSTRUCT_ENCODE_TYPE(uint32, Buf, Barter_BuyerModeOff);
Message(Chat::Red, "You cannot be a Trader and Buyer at the same time.");
}
QueuePacket(app);
break;
}
case Barter_BuyerModeOff:
{
QueuePacket(app);
ToggleBuyerMode(false);
break;
}
case Barter_BuyerItemUpdate:
{
UpdateBuyLine(app);
break;
}
case Barter_BuyerItemRemove:
{
BuyerRemoveItem_Struct* bris = (BuyerRemoveItem_Struct*)app->pBuffer;
database.RemoveBuyLine(CharacterID(), bris->BuySlot);
QueuePacket(app);
break;
}
case Barter_SellItem:
{
SellToBuyer(app);
break;
}
case Barter_BuyerInspectBegin:
{
ShowBuyLines(app);
break;
}
case Barter_BuyerInspectEnd:
{
BuyerInspectRequest_Struct* bir = (BuyerInspectRequest_Struct*)app->pBuffer;
Client *Buyer = entity_list.GetClientByID(bir->BuyerID);
if (Buyer)
Buyer->WithCustomer(0);
break;
}
case Barter_BarterItemInspect:
{
BarterItemSearchLinkRequest_Struct* bislr = (BarterItemSearchLinkRequest_Struct*)app->pBuffer;
const EQ::ItemData* item = database.GetItem(bislr->ItemID);
if (!item)
Message(Chat::Red, "Error: This item does not exist!");
else
{
EQ::ItemInstance* inst = database.CreateItem(item);
if (inst)
{
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
}
break;
}
case Barter_Welcome:
{
SendBazaarWelcome();
break;
}
case Barter_WelcomeMessageUpdate:
{
BuyerWelcomeMessageUpdate_Struct* bwmu = (BuyerWelcomeMessageUpdate_Struct*)app->pBuffer;
SetBuyerWelcomeMessage(bwmu->WelcomeMessage);
break;
}
case Barter_BuyerItemInspect:
{
BuyerItemSearchLinkRequest_Struct* bislr = (BuyerItemSearchLinkRequest_Struct*)app->pBuffer;
const EQ::ItemData* item = database.GetItem(bislr->ItemID);
if (!item)
Message(Chat::Red, "Error: This item does not exist!");
else
{
EQ::ItemInstance* inst = database.CreateItem(item);
if (inst)
{
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
}
break;
}
case Barter_Unknown23:
{
// Sent by SoD client for no discernible reason.
break;
}
default:
Message(Chat::Red, "Unrecognised Barter action.");
LogTrading("Unrecognised Barter Action [{}]", Action);
}
}
void Client::Handle_OP_BazaarInspect(const EQApplicationPacket *app)
{
if (app->size != sizeof(BazaarInspect_Struct)) {
LogError("Invalid size for BazaarInspect_Struct: Expected [{}], Got [{}]",
sizeof(BazaarInspect_Struct), app->size);
return;
}
BazaarInspect_Struct* bis = (BazaarInspect_Struct*)app->pBuffer;
const EQ::ItemData* item = database.GetItem(bis->ItemID);
if (!item) {
Message(Chat::Red, "Error: This item does not exist!");
return;
}
EQ::ItemInstance* inst = database.CreateItem(item);
if (inst) {
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
return;
}
void Client::Handle_OP_BazaarSearch(const EQApplicationPacket *app)
{
if (app->size == sizeof(BazaarSearch_Struct)) {
BazaarSearch_Struct* bss = (BazaarSearch_Struct*)app->pBuffer;
this->SendBazaarResults(bss->TraderID, bss->Class_, bss->Race, bss->ItemStat, bss->Slot, bss->Type,
bss->Name, bss->MinPrice * 1000, bss->MaxPrice * 1000);
}
else if (app->size == sizeof(BazaarWelcome_Struct)) {
BazaarWelcome_Struct* bws = (BazaarWelcome_Struct*)app->pBuffer;
if (bws->Beginning.Action == BazaarWelcome)
SendBazaarWelcome();
}
else if (app->size == sizeof(NewBazaarInspect_Struct)) {
NewBazaarInspect_Struct *nbis = (NewBazaarInspect_Struct*)app->pBuffer;
Client *c = entity_list.GetClientByName(nbis->Name);
if (c) {
EQ::ItemInstance* inst = c->FindTraderItemBySerialNumber(nbis->SerialNumber);
if (inst)
SendItemPacket(0, inst, ItemPacketViewLink);
}
return;
}
else {
LogTrading("Malformed BazaarSearch_Struct packe, Action [{}]t received, ignoring");
LogError("Malformed BazaarSearch_Struct packet received, ignoring\n");
}
return;
}
void Client::Handle_OP_Begging(const EQApplicationPacket *app)
{
if (!p_timers.Expired(&database, pTimerBeggingPickPocket, false))
{
Message(Chat::Red, "Ability recovery time not yet met.");
auto outapp = new EQApplicationPacket(OP_Begging, sizeof(BeggingResponse_Struct));
BeggingResponse_Struct *brs = (BeggingResponse_Struct*)outapp->pBuffer;
brs->Result = 0;
FastQueuePacket(&outapp);
return;
}
if (!HasSkill(EQ::skills::SkillBegging) || !GetTarget())
return;
if (GetTarget()->GetClass() == LDON_TREASURE)
return;
p_timers.Start(pTimerBeggingPickPocket, 8);
auto outapp = new EQApplicationPacket(OP_Begging, sizeof(BeggingResponse_Struct));
BeggingResponse_Struct *brs = (BeggingResponse_Struct*)outapp->pBuffer;
brs->Result = 0; // Default, Fail.
if (GetTarget() == this)
{
FastQueuePacket(&outapp);
return;
}
int RandomChance = zone->random.Int(0, 100);
int ChanceToAttack = 0;
if (GetLevel() > GetTarget()->GetLevel())
ChanceToAttack = zone->random.Int(0, 15);
else
ChanceToAttack = zone->random.Int(((this->GetTarget()->GetLevel() - this->GetLevel()) * 10) - 5, ((this->GetTarget()->GetLevel() - this->GetLevel()) * 10));
if (ChanceToAttack < 0)
ChanceToAttack = -ChanceToAttack;
if (RandomChance < ChanceToAttack)
{
GetTarget()->Attack(this);
QueuePacket(outapp);
safe_delete(outapp);
return;
}
uint16 CurrentSkill = GetSkill(EQ::skills::SkillBegging);
float ChanceToBeg = ((float)(CurrentSkill / 700.0f) + 0.15f) * 100;
if (RandomChance < ChanceToBeg)
{
brs->Amount = zone->random.Int(1, 10);
// This needs some work to determine how much money they can beg, based on skill level etc.
if (CurrentSkill < 50)
{
brs->Result = 4; // Copper
AddMoneyToPP(brs->Amount, false);
}
else
{
brs->Result = 3; // Silver
AddMoneyToPP(brs->Amount * 10, false);
}
}
QueuePacket(outapp);
safe_delete(outapp);
CheckIncreaseSkill(EQ::skills::SkillBegging, nullptr, -10);
}
void Client::Handle_OP_Bind_Wound(const EQApplicationPacket *app)
{
if (app->size != sizeof(BindWound_Struct)) {
LogError("Size mismatch for Bind wound packet");
DumpPacket(app);
}
BindWound_Struct* bind_in = (BindWound_Struct*)app->pBuffer;
Mob* bindmob = entity_list.GetMob(bind_in->to);
if (!bindmob) {
LogError("Bindwound on non-exsistant mob from [{}]", this->GetName());
}
else {
LogDebug("BindWound in: to:\'[{}]\' from=\'[{}]\'", bindmob->GetName(), GetName());
BindWound(bindmob, true);
}
return;
}
void Client::Handle_OP_BlockedBuffs(const EQApplicationPacket *app)
{
if (!RuleB(Spells, EnableBlockedBuffs))
return;
if (app->size != sizeof(BlockedBuffs_Struct))
{
LogDebug("Size mismatch in OP_BlockedBuffs expected [{}] got [{}]", sizeof(BlockedBuffs_Struct), app->size);
DumpPacket(app);
return;
}
std::set<uint32>::iterator Iterator;
BlockedBuffs_Struct *bbs = (BlockedBuffs_Struct*)app->pBuffer;
std::set<uint32> *BlockedBuffs = bbs->Pet ? &PetBlockedBuffs : &PlayerBlockedBuffs;
if (bbs->Initialise == 1)
{
BlockedBuffs->clear();
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
{
if ((IsValidSpell(bbs->SpellID[i])) && IsBeneficialSpell(bbs->SpellID[i]) && !spells[bbs->SpellID[i]].no_block)
{
if (BlockedBuffs->find(bbs->SpellID[i]) == BlockedBuffs->end())
BlockedBuffs->insert(bbs->SpellID[i]);
}
}
auto outapp = new EQApplicationPacket(OP_BlockedBuffs, sizeof(BlockedBuffs_Struct));
BlockedBuffs_Struct *obbs = (BlockedBuffs_Struct*)outapp->pBuffer;
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
obbs->SpellID[i] = -1;
obbs->Pet = bbs->Pet;
obbs->Initialise = 1;
obbs->Flags = 0x54;
obbs->Count = BlockedBuffs->size();
unsigned int Element = 0;
Iterator = BlockedBuffs->begin();
while (Iterator != BlockedBuffs->end())
{
obbs->SpellID[Element++] = (*Iterator);
++Iterator;
}
FastQueuePacket(&outapp);
return;
}
if ((bbs->Initialise == 0) && (bbs->Count > 0))
{
auto outapp = new EQApplicationPacket(OP_BlockedBuffs, sizeof(BlockedBuffs_Struct));
BlockedBuffs_Struct *obbs = (BlockedBuffs_Struct*)outapp->pBuffer;
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
obbs->SpellID[i] = -1;
obbs->Pet = bbs->Pet;
obbs->Initialise = 0;
obbs->Flags = 0x54;
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
{
if (!IsValidSpell(bbs->SpellID[i]) || !IsBeneficialSpell(bbs->SpellID[i]) || spells[bbs->SpellID[i]].no_block)
continue;
if ((BlockedBuffs->size() < BLOCKED_BUFF_COUNT) && (BlockedBuffs->find(bbs->SpellID[i]) == BlockedBuffs->end()))
BlockedBuffs->insert(bbs->SpellID[i]);
}
obbs->Count = BlockedBuffs->size();
Iterator = BlockedBuffs->begin();
unsigned int Element = 0;
while (Iterator != BlockedBuffs->end())
{
obbs->SpellID[Element++] = (*Iterator);
++Iterator;
}
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_BoardBoat(const EQApplicationPacket *app)
{
// this sends unclean mob name, so capped at 64
// a_boat006
if (app->size <= 5 || app->size > 64) {
LogError("Size mismatch in OP_BoardBoad. Expected greater than 5 less than 64, got [{}]", app->size);
DumpPacket(app);
return;
}
char boatname[64];
memcpy(boatname, app->pBuffer, app->size);
boatname[63] = '\0';
Mob* boat = entity_list.GetMob(boatname);
if (!boat || !boat->IsControllableBoat()) {
return;
}
controlling_boat_id = boat->GetID(); // set the client's BoatID to show that it's on this boat
Message(0, "Board boat: %s", boatname);
return;
}
void Client::Handle_OP_Buff(const EQApplicationPacket *app)
{
if (app->size != sizeof(SpellBuffPacket_Struct))
{
LogError("Size mismatch in OP_Buff. expected [{}] got [{}]", sizeof(SpellBuffPacket_Struct), app->size);
DumpPacket(app);
return;
}
SpellBuffPacket_Struct* sbf = (SpellBuffPacket_Struct*)app->pBuffer;
uint32 spid = sbf->buff.spellid;
LogSpells("Client requested that buff with spell id [{}] be canceled", spid);
//something about IsDetrimentalSpell() crashes this portion of code..
//tbh we shouldn't use it anyway since this is a simple red vs blue buff check and
//isdetrimentalspell() is much more complex
if (spid == 0xFFFF || (IsValidSpell(spid) && (spells[spid].goodEffect == 0)))
QueuePacket(app);
else
BuffFadeBySpellID(spid);
return;
}
void Client::Handle_OP_BuffRemoveRequest(const EQApplicationPacket *app)
{
// In SoD, this is used for clicking off Pet Buffs only. In Underfoot, it is used both for Client and Pets
// The payload contains buffslot and EntityID only, so we must check if the EntityID is ours or our pets.
//
VERIFY_PACKET_LENGTH(OP_BuffRemoveRequest, app, BuffRemoveRequest_Struct);
BuffRemoveRequest_Struct *brrs = (BuffRemoveRequest_Struct*)app->pBuffer;
Mob *m = nullptr;
if (brrs->EntityID == GetID()) {
m = this;
}
else if (brrs->EntityID == GetPetID()) {
m = GetPet();
}
#ifdef BOTS
else {
Mob* bot_test = entity_list.GetMob(brrs->EntityID);
if (bot_test && bot_test->IsBot() && bot_test->GetOwner() == this)
m = bot_test;
}
#endif
if (!m)
return;
if (brrs->SlotID > (uint32)m->GetMaxTotalSlots())
return;
uint16 SpellID = m->GetSpellIDFromSlot(brrs->SlotID);
if (SpellID && IsBeneficialSpell(SpellID) && !spells[SpellID].no_remove)
m->BuffFadeBySlot(brrs->SlotID, true);
}
void Client::Handle_OP_Bug(const EQApplicationPacket *app)
{
if (!RuleB(Bugs, ReportingSystemActive)) {
Message(0, "Bug reporting is disabled on this server.");
return;
}
if (app->size != sizeof(BugReport_Struct)) {
printf("Wrong size of BugReport_Struct got %d expected %zu!\n", app->size, sizeof(BugReport_Struct));
}
else {
BugReport_Struct* bug_report = (BugReport_Struct*)app->pBuffer;
if (RuleB(Bugs, UseOldReportingMethod))
database.RegisterBug(bug_report);
else
database.RegisterBug(this, bug_report);
}
return;
}
void Client::Handle_OP_Camp(const EQApplicationPacket *app)
{
#ifdef BOTS
// This block is necessary to clean up any bot objects owned by a Client
Bot::BotOrderCampAll(this);
// Evidently, this is bad under certain conditions and causes crashes...
// Group and Raid code really needs to be overhauled to account for non-client types (mercs and bots)
//auto group = GetGroup();
//if (group && group->GroupCount() < 2)
// group->DisbandGroup();
#endif
if (IsLFP())
worldserver.StopLFP(CharacterID());
if (GetGM())
{
OnDisconnect(true);
return;
}
camp_timer.Start(29000, true);
return;
}
void Client::Handle_OP_CancelTask(const EQApplicationPacket *app)
{
if (app->size != sizeof(CancelTask_Struct)) {
LogDebug("Size mismatch in OP_CancelTask expected [{}] got [{}]", sizeof(CancelTask_Struct), app->size);
DumpPacket(app);
return;
}
CancelTask_Struct *cts = (CancelTask_Struct*)app->pBuffer;
if (RuleB(TaskSystem, EnableTaskSystem) && task_state)
task_state->CancelTask(this, cts->SequenceNumber, static_cast<TaskType>(cts->type));
}
void Client::Handle_OP_CancelTrade(const EQApplicationPacket *app)
{
if (app->size != sizeof(CancelTrade_Struct)) {
LogError("Wrong size: OP_CancelTrade, size=[{}], expected [{}]", app->size, sizeof(CancelTrade_Struct));
return;
}
Mob* with = trade->With();
if (with && with->IsClient()) {
CancelTrade_Struct* msg = (CancelTrade_Struct*)app->pBuffer;
// Forward cancel packet to other client
msg->fromid = with->GetID();
//msg->action = 1;
with->CastToClient()->QueuePacket(app);
// Put trade items/cash back into inventory
FinishTrade(this);
trade->Reset();
}
else if (with) {
CancelTrade_Struct* msg = (CancelTrade_Struct*)app->pBuffer;
msg->fromid = with->GetID();
QueuePacket(app);
FinishTrade(this);
trade->Reset();
}
EQApplicationPacket end_trade1(OP_FinishWindow, 0);
QueuePacket(&end_trade1);
EQApplicationPacket end_trade2(OP_FinishWindow2, 0);
QueuePacket(&end_trade2);
return;
}
void Client::Handle_OP_CastSpell(const EQApplicationPacket *app)
{
using EQ::spells::CastingSlot;
if (app->size != sizeof(CastSpell_Struct)) {
std::cout << "Wrong size: OP_CastSpell, size=" << app->size << ", expected " << sizeof(CastSpell_Struct) << std::endl;
return;
}
if (IsAIControlled()) {
this->MessageString(Chat::Red, NOT_IN_CONTROL);
//Message(Chat::Red, "You cant cast right now, you arent in control of yourself!");
return;
}
// Hack for broken RoF2 which allows casting after a zoned IVU/IVA
if (invisible_undead || invisible_animals) {
BuffFadeByEffect(SE_InvisVsAnimals);
BuffFadeByEffect(SE_InvisVsUndead);
BuffFadeByEffect(SE_InvisVsUndead2);
BuffFadeByEffect(SE_Invisibility); // Included per JJ for completeness - client handles this one atm
}
CastSpell_Struct* castspell = (CastSpell_Struct*)app->pBuffer;
m_TargetRing = glm::vec3(castspell->x_pos, castspell->y_pos, castspell->z_pos);
LogSpells("OP CastSpell: slot [{}] spell [{}] target [{}] inv [{}]", castspell->slot, castspell->spell_id, castspell->target_id, (unsigned long)castspell->inventoryslot);
CastingSlot slot = static_cast<CastingSlot>(castspell->slot);
/* Memorized Spell */
if (m_pp.mem_spells[castspell->slot] && m_pp.mem_spells[castspell->slot] == castspell->spell_id) {
uint16 spell_to_cast = 0;
if (castspell->slot < EQ::spells::SPELL_GEM_COUNT) {
spell_to_cast = m_pp.mem_spells[castspell->slot];
if (spell_to_cast != castspell->spell_id) {
InterruptSpell(castspell->spell_id); //CHEATER!!!
return;
}
}
else if (castspell->slot >= EQ::spells::SPELL_GEM_COUNT) {
InterruptSpell();
return;
}
CastSpell(spell_to_cast, castspell->target_id, slot);
}
/* Spell Slot or Potion Belt Slot */
else if (slot == CastingSlot::Item || slot == CastingSlot::PotionBelt) // ITEM or POTION cast
{
if (m_inv.SupportsClickCasting(castspell->inventoryslot) || slot == CastingSlot::PotionBelt) // sanity check
{
// packet field types will be reviewed as packet transistions occur
const EQ::ItemInstance* inst = m_inv[castspell->inventoryslot]; //slot values are int16, need to check packet on this field
//bool cancast = true;
if (inst && inst->IsClassCommon())
{
const EQ::ItemData* item = inst->GetItem();
if (item->Click.Effect != (uint32)castspell->spell_id)
{
database.SetMQDetectionFlag(account_name, name, "OP_CastSpell with item, tried to cast a different spell.", zone->GetShortName());
InterruptSpell(castspell->spell_id); //CHEATER!!
return;
}
if ((item->Click.Type == EQ::item::ItemEffectClick) || (item->Click.Type == EQ::item::ItemEffectExpendable) || (item->Click.Type == EQ::item::ItemEffectEquipClick) || (item->Click.Type == EQ::item::ItemEffectClick2))
{
if (item->Click.Level2 > 0)
{
if (GetLevel() >= item->Click.Level2)
{
EQ::ItemInstance* p_inst = (EQ::ItemInstance*)inst;
int i = parse->EventItem(EVENT_ITEM_CLICK_CAST, this, p_inst, nullptr, "", castspell->inventoryslot);
if (i == 0) {
CastSpell(item->Click.Effect, castspell->target_id, slot, item->CastTime, 0, 0, castspell->inventoryslot);
}
else {
InterruptSpell(castspell->spell_id);
return;
}
}
else
{
database.SetMQDetectionFlag(account_name, name, "OP_CastSpell with item, did not meet req level.", zone->GetShortName());
Message(0, "Error: level not high enough.", castspell->inventoryslot);
InterruptSpell(castspell->spell_id);
}
}
else
{
EQ::ItemInstance* p_inst = (EQ::ItemInstance*)inst;
int i = parse->EventItem(EVENT_ITEM_CLICK_CAST, this, p_inst, nullptr, "", castspell->inventoryslot);
if (i == 0) {
CastSpell(item->Click.Effect, castspell->target_id, slot, item->CastTime, 0, 0, castspell->inventoryslot);
}
else {
InterruptSpell(castspell->spell_id);
return;
}
}
}
else
{
Message(0, "Error: unknown item->Click.Type (0x%02x)", item->Click.Type);
}
}
else
{
Message(0, "Error: item not found in inventory slot #%i", castspell->inventoryslot);
InterruptSpell(castspell->spell_id);
}
}
else
{
Message(0, "Error: castspell->inventoryslot >= %i (0x%04x)", EQ::invslot::slotCursor, castspell->inventoryslot);
InterruptSpell(castspell->spell_id);
}
}
/* Discipline -- older clients use the same slot as items, but we translate to it's own */
else if (slot == CastingSlot::Discipline) {
if (!UseDiscipline(castspell->spell_id, castspell->target_id)) {
LogSpells("Unknown ability being used by [{}], spell being cast is: [{}]\n", GetName(), castspell->spell_id);
InterruptSpell(castspell->spell_id);
return;
}
}
/* ABILITY cast (LoH and Harm Touch) */
else if (slot == CastingSlot::Ability) {
uint16 spell_to_cast = 0;
if (castspell->spell_id == SPELL_LAY_ON_HANDS && GetClass() == PALADIN) {
if (!p_timers.Expired(&database, pTimerLayHands)) {
Message(Chat::Red, "Ability recovery time not yet met.");
InterruptSpell(castspell->spell_id);
return;
}
spell_to_cast = SPELL_LAY_ON_HANDS;
p_timers.Start(pTimerLayHands, LayOnHandsReuseTime);
}
else if ((castspell->spell_id == SPELL_HARM_TOUCH
|| castspell->spell_id == SPELL_HARM_TOUCH2) && GetClass() == SHADOWKNIGHT) {
if (!p_timers.Expired(&database, pTimerHarmTouch)) {
Message(Chat::Red, "Ability recovery time not yet met.");
InterruptSpell(castspell->spell_id);
return;
}
// determine which version of HT we are casting based on level
if (GetLevel() < 40)
spell_to_cast = SPELL_HARM_TOUCH;
else
spell_to_cast = SPELL_HARM_TOUCH2;
p_timers.Start(pTimerHarmTouch, HarmTouchReuseTime);
}
if (spell_to_cast > 0) // if we've matched LoH or HT, cast now
CastSpell(spell_to_cast, castspell->target_id, slot);
}
return;
}
void Client::Handle_OP_ChannelMessage(const EQApplicationPacket *app)
{
ChannelMessage_Struct* cm = (ChannelMessage_Struct*)app->pBuffer;
if (app->size < sizeof(ChannelMessage_Struct)) {
std::cout << "Wrong size " << app->size << ", should be " << sizeof(ChannelMessage_Struct) << "+ on 0x" << std::hex << std::setfill('0') << std::setw(4) << app->GetOpcode() << std::dec << std::endl;
return;
}
if (IsAIControlled() && !GetGM()) {
Message(Chat::Red, "You try to speak but cant move your mouth!");
return;
}
uint8 skill_in_language = 100;
if (cm->language < MAX_PP_LANGUAGE)
{
skill_in_language = m_pp.languages[cm->language];
}
ChannelMessageReceived(cm->chan_num, cm->language, skill_in_language, cm->message, cm->targetname);
return;
}
void Client::Handle_OP_ClearBlockedBuffs(const EQApplicationPacket *app)
{
if (!RuleB(Spells, EnableBlockedBuffs))
return;
if (app->size != 1)
{
LogDebug("Size mismatch in OP_ClearBlockedBuffs expected 1 got [{}]", app->size);
DumpPacket(app);
return;
}
bool Pet = app->pBuffer[0];
if (Pet)
PetBlockedBuffs.clear();
else
PlayerBlockedBuffs.clear();
QueuePacket(app);
}
void Client::Handle_OP_ClearNPCMarks(const EQApplicationPacket *app)
{
if (app->size != 0)
{
LogDebug("Size mismatch in OP_ClearNPCMarks expected 0 got [{}]", app->size);
DumpPacket(app);
return;
}
Group *g = GetGroup();
if (g)
g->ClearAllNPCMarks();
}
void Client::Handle_OP_ClearSurname(const EQApplicationPacket *app)
{
ChangeLastName("");
}
void Client::Handle_OP_ClickDoor(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClickDoor_Struct)) {
LogError("Wrong size: OP_ClickDoor, size=[{}], expected [{}]", app->size, sizeof(ClickDoor_Struct));
return;
}
ClickDoor_Struct* cd = (ClickDoor_Struct*)app->pBuffer;
Doors* currentdoor = entity_list.FindDoor(cd->doorid);
if (!currentdoor)
{
Message(0, "Unable to find door, please notify a GM (DoorID: %i).", cd->doorid);
return;
}
// set door selected
if (IsDevToolsEnabled()) {
SetDoorToolEntityId(currentdoor->GetEntityID());
DoorManipulation::CommandHeader(this);
Message(
Chat::White,
fmt::format(
"Door ({}) [{}]",
currentdoor->GetEntityID(),
EQ::SayLinkEngine::GenerateQuestSaylink("#door edit", false, "#door edit")
).c_str()
);
}
char buf[20];
snprintf(buf, 19, "%u", cd->doorid);
buf[19] = '\0';
std::vector<EQ::Any> args;
args.push_back(currentdoor);
parse->EventPlayer(EVENT_CLICK_DOOR, this, buf, 0, &args);
currentdoor->HandleClick(this, 0);
return;
}
void Client::Handle_OP_ClickObject(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClickObject_Struct)) {
LogError("Invalid size on ClickObject_Struct: Expected [{}], Got [{}]",
sizeof(ClickObject_Struct), app->size);
return;
}
ClickObject_Struct* click_object = (ClickObject_Struct*)app->pBuffer;
Entity* entity = entity_list.GetID(click_object->drop_id);
if (entity && entity->IsObject()) {
Object* object = entity->CastToObject();
object->HandleClick(this, click_object);
std::vector<EQ::Any> args;
args.push_back(object);
char buf[10];
snprintf(buf, 9, "%u", click_object->drop_id);
buf[9] = '\0';
parse->EventPlayer(EVENT_CLICK_OBJECT, this, buf, GetID(), &args);
}
// Observed in RoF after OP_ClickObjectAction:
//EQApplicationPacket end_trade2(OP_FinishWindow2, 0);
//QueuePacket(&end_trade2);
return;
}
void Client::Handle_OP_ClickObjectAction(const EQApplicationPacket *app)
{
if (app->size == 0) {
// RoF sends this packet 0 sized when switching from auto-combine to experiment windows.
// Not completely sure if 0 sized is for this or for closing objects as commented out below
EQApplicationPacket end_trade1(OP_FinishWindow, 0);
QueuePacket(&end_trade1);
EQApplicationPacket end_trade2(OP_FinishWindow2, 0);
QueuePacket(&end_trade2);
// RoF sends a 0 sized packet for closing objects
if (GetTradeskillObject() && ClientVersion() >= EQ::versions::ClientVersion::RoF)
GetTradeskillObject()->CastToObject()->Close();
return;
}
else
{
if (app->size != sizeof(ClickObjectAction_Struct)) {
LogError("Invalid size on OP_ClickObjectAction: Expected [{}], Got [{}]",
sizeof(ClickObjectAction_Struct), app->size);
return;
}
ClickObjectAction_Struct* oos = (ClickObjectAction_Struct*)app->pBuffer;
Entity* entity = entity_list.GetEntityObject(oos->drop_id);
if (entity && entity->IsObject()) {
Object* object = entity->CastToObject();
if (oos->open == 0) {
object->Close();
}
else {
LogError("Unsupported action [{}] in OP_ClickObjectAction", oos->open);
}
}
else {
LogError("Invalid object [{}] in OP_ClickObjectAction", oos->drop_id);
}
}
SetTradeskillObject(nullptr);
EQApplicationPacket end_trade1(OP_FinishWindow, 0);
QueuePacket(&end_trade1);
EQApplicationPacket end_trade2(OP_FinishWindow2, 0);
QueuePacket(&end_trade2);
return;
}
void Client::Handle_OP_ClientError(const EQApplicationPacket *app)
{
ClientError_Struct* error = (ClientError_Struct*)app->pBuffer;
LogError("Client error: [{}]", error->character_name);
LogError("Error message:[{}]", error->message);
return;
}
void Client::Handle_OP_ClientTimeStamp(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_ClientUpdate(const EQApplicationPacket *app) {
if (IsAIControlled())
return;
if (dead)
return;
/* Invalid size check */
if (app->size != sizeof(PlayerPositionUpdateClient_Struct)
&& app->size != (sizeof(PlayerPositionUpdateClient_Struct) + 1)
) {
LogError("OP size error: OP_ClientUpdate expected:[{}] got:[{}]",
sizeof(PlayerPositionUpdateClient_Struct), app->size);
return;
}
PlayerPositionUpdateClient_Struct *ppu = (PlayerPositionUpdateClient_Struct *) app->pBuffer;
/* Non PC handling like boats and eye of zomm */
if (ppu->spawn_id && ppu->spawn_id != GetID()) {
Mob *cmob = entity_list.GetMob(ppu->spawn_id);
if (!cmob) {
return;
}
if (cmob->IsControllableBoat()) {
// Controllable boats
auto boat_delta = glm::vec4(ppu->delta_x, ppu->delta_y, ppu->delta_z, EQ10toFloat(ppu->delta_heading));
cmob->SetDelta(boat_delta);
auto outapp = new EQApplicationPacket(OP_ClientUpdate, sizeof(PlayerPositionUpdateServer_Struct));
PlayerPositionUpdateServer_Struct *ppus = (PlayerPositionUpdateServer_Struct *) outapp->pBuffer;
cmob->MakeSpawnUpdate(ppus);
entity_list.QueueCloseClients(cmob, outapp, true, 300, this, false);
safe_delete(outapp);
/* Update the boat's position on the server, without sending an update */
cmob->GMMove(ppu->x_pos, ppu->y_pos, ppu->z_pos, EQ12toFloat(ppu->heading), false);
return;
}
else {
// Eye of Zomm needs code here to track position of the eye on server
// so that other clients see it. I could add a check here for eye of zomm
// race, to limit this code, but this should handle any client controlled
// mob that gets updates from OP_ClientUpdate
if (!cmob->IsControllableBoat() && ppu->spawn_id == controlled_mob_id) {
cmob->SetPosition(ppu->x_pos, ppu->y_pos, ppu->z_pos);
cmob->SetHeading(EQ12toFloat(ppu->heading));
mMovementManager->SendCommandToClients(cmob, 0.0, 0.0, 0.0,
0.0, 0, ClientRangeAny, nullptr, this);
cmob->CastToNPC()->SaveGuardSpot(glm::vec4(ppu->x_pos,
ppu->y_pos, ppu->z_pos, EQ12toFloat(ppu->heading)));
}
}
return;
}
// At this point, all that's left is a client update.
// Pure boat updates, and client contolled mob updates are complete.
// This can still be tricky. If ppu->vehicle_id is set, then the client
// position is actually an offset from the boat he is inside.
bool on_boat = (ppu->vehicle_id != 0);
// From this point forward, we need to use a new set of variables for client
// position. If the client is in a boat, we need to add the boat pos and
// the client offset together.
float cx = ppu->x_pos;
float cy = ppu->y_pos;
float cz = ppu->z_pos;
float new_heading = EQ12toFloat(ppu->heading);
if (on_boat) {
Mob *boat = entity_list.GetMob(ppu->vehicle_id);
if (boat == 0) {
LogError("Can't find boat for client position offset.");
}
else {
if (boat->turning) return;
// Calculate angle from boat heading to EQ heading
double theta = std::fmod(((boat->GetHeading() * 360.0) / 512.0),360.0);
double thetar = (theta * M_PI) / 180.0;
// Boat cx is inverted (positive to left)
// Boat cy is normal (positive toward heading)
double cosine = std::cos(thetar);
double sine = std::sin(thetar);
double normalizedx, normalizedy;
normalizedx = cx * cosine - -cy * sine;
normalizedy = -cx * sine + cy * cosine;
cx = boat->GetX() + normalizedx;
cy = boat->GetY() + normalizedy;
cz += boat->GetZ();
new_heading += boat->GetHeading();
}
}
cheat_manager.MovementCheck(glm::vec3(cx, cy, cz));
if (IsDraggingCorpse())
DragCorpses();
/* Check to see if PPU should trigger an update to the rewind position. */
float rewind_x_diff = 0;
float rewind_y_diff = 0;
rewind_x_diff = cx - m_RewindLocation.x;
rewind_x_diff *= rewind_x_diff;
rewind_y_diff = cy - m_RewindLocation.y;
rewind_y_diff *= rewind_y_diff;
/*
We only need to store updated values if the player has moved.
If the player has moved more than units for x or y, then we'll store
his pre-PPU x and y for /rewind, in case he gets stuck.
*/
if ((rewind_x_diff > 750) || (rewind_y_diff > 750))
m_RewindLocation = glm::vec3(m_Position);
/*
If the PPU was a large jump, such as a cross zone gate or Call of Hero,
just update rewind coordinates to the new ppu coordinates. This will prevent exploitation.
*/
if ((rewind_x_diff > 5000) || (rewind_y_diff > 5000))
m_RewindLocation = glm::vec3(cx, cy, cz);
if (proximity_timer.Check()) {
entity_list.ProcessMove(this, glm::vec3(cx, cy, cz));
if (RuleB(TaskSystem, EnableTaskSystem) && RuleB(TaskSystem, EnableTaskProximity))
ProcessTaskProximities(cx, cy, cz);
m_Proximity = glm::vec3(cx, cy, cz);
}
/* Update internal state */
m_Delta = glm::vec4(ppu->delta_x, ppu->delta_y, ppu->delta_z, EQ10toFloat(ppu->delta_heading));
if (IsTracking() && ((m_Position.x != cx) || (m_Position.y != cy))) {
if (zone->random.Real(0, 100) < 70)//should be good
CheckIncreaseSkill(EQ::skills::SkillTracking, nullptr, -20);
}
/* Break Hide if moving without sneaking and set rewind timer if moved */
if (cy != m_Position.y || cx != m_Position.x) {
if ((hidden || improved_hidden) && !sneaking) {
hidden = false;
improved_hidden = false;
if (!invisible) {
auto outapp =
new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct *sa_out = (SpawnAppearance_Struct *) outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x03;
sa_out->parameter = 0;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
}
}
rewind_timer.Start(30000, true);
}
is_client_moving = !(cy == m_Position.y && cx == m_Position.x);
/**
* Client aggro scanning
*/
const uint16 client_scan_npc_aggro_timer_idle = RuleI(Aggro, ClientAggroCheckIdleInterval);
const uint16 client_scan_npc_aggro_timer_moving = RuleI(Aggro, ClientAggroCheckMovingInterval);
LogAggroDetail(
"ClientUpdate [{}] {}moving, scan timer [{}]",
GetCleanName(),
is_client_moving ? "" : "NOT ",
client_scan_npc_aggro_timer.GetRemainingTime()
);
if (is_client_moving) {
if (client_scan_npc_aggro_timer.GetRemainingTime() > client_scan_npc_aggro_timer_moving) {
LogAggroDetail("Client [{}] Restarting with moving timer", GetCleanName());
client_scan_npc_aggro_timer.Disable();
client_scan_npc_aggro_timer.Start(client_scan_npc_aggro_timer_moving);
client_scan_npc_aggro_timer.Trigger();
}
}
else if (client_scan_npc_aggro_timer.GetDuration() == client_scan_npc_aggro_timer_moving) {
LogAggroDetail("Client [{}] Restarting with idle timer", GetCleanName());
client_scan_npc_aggro_timer.Disable();
client_scan_npc_aggro_timer.Start(client_scan_npc_aggro_timer_idle);
}
/**
* Client mob close list cache scan timer
*/
const uint16 client_mob_close_scan_timer_moving = 6000;
const uint16 client_mob_close_scan_timer_idle = 60000;
LogAIScanCloseDetail(
"Client [{}] {}moving, scan timer [{}]",
GetCleanName(),
is_client_moving ? "" : "NOT ",
mob_close_scan_timer.GetRemainingTime()
);
if (is_client_moving) {
if (mob_close_scan_timer.GetRemainingTime() > client_mob_close_scan_timer_moving) {
LogAIScanCloseDetail("Client [{}] Restarting with moving timer", GetCleanName());
mob_close_scan_timer.Disable();
mob_close_scan_timer.Start(client_mob_close_scan_timer_moving);
mob_close_scan_timer.Trigger();
}
}
else if (mob_close_scan_timer.GetDuration() == client_mob_close_scan_timer_moving) {
LogAIScanCloseDetail("Client [{}] Restarting with idle timer", GetCleanName());
mob_close_scan_timer.Disable();
mob_close_scan_timer.Start(client_mob_close_scan_timer_idle);
}
/**
* On a normal basis we limit mob movement updates based on distance
* This ensures we send a periodic full zone update to a client that has started moving after 5 or so minutes
*
* For very large zones we will also force a full update based on distance
*
* We ignore a small distance around us so that we don't interrupt already pathing deltas as those npcs will appear
* to full stop when they are actually still pathing
*/
float distance_moved = DistanceNoZ(GetLastPositionBeforeBulkUpdate(), GetPosition());
bool moved_far_enough_before_bulk_update = distance_moved >= zone->GetNpcPositionUpdateDistance();
bool is_ready_to_update = (
client_zone_wide_full_position_update_timer.Check() || moved_far_enough_before_bulk_update
);
if (is_client_moving && is_ready_to_update) {
LogDebug("[[{}]] Client Zone Wide Position Update NPCs", GetCleanName());
auto &mob_movement_manager = MobMovementManager::Get();
auto &mob_list = entity_list.GetMobList();
for (auto &it : mob_list) {
Mob *entity = it.second;
if (!entity->IsNPC()) {
continue;
}
int animation_speed = 0;
if (entity->IsMoving()) {
if (entity->IsRunning()) {
animation_speed = (entity->IsFeared() ? entity->GetFearSpeed() : entity->GetRunspeed());
}
else {
animation_speed = entity->GetWalkspeed();
}
}
mob_movement_manager.SendCommandToClients(entity, 0.0, 0.0, 0.0, 0.0, animation_speed, ClientRangeAny, this);
}
SetLastPositionBeforeBulkUpdate(GetPosition());
}
int32 new_animation = ppu->animation;
/* Update internal server position from what the client has sent */
m_Position.x = cx;
m_Position.y = cy;
m_Position.z = cz;
/* Visual Debugging */
if (RuleB(Character, OPClientUpdateVisualDebug)) {
LogDebug("ClientUpdate: ppu x: [{}] y: [{}] z: [{}] h: [{}]", cx, cy, cz, new_heading);
this->SendAppearanceEffect(78, 0, 0, 0, 0);
this->SendAppearanceEffect(41, 0, 0, 0, 0);
}
/* Only feed real time updates when client is moving */
if (is_client_moving || new_heading != m_Position.w || new_animation != animation) {
animation = ppu->animation;
m_Position.w = new_heading;
/* Broadcast update to other clients */
auto outapp = new EQApplicationPacket(OP_ClientUpdate, sizeof(PlayerPositionUpdateServer_Struct));
PlayerPositionUpdateServer_Struct *position_update = (PlayerPositionUpdateServer_Struct *) outapp->pBuffer;
MakeSpawnUpdate(position_update);
if (gm_hide_me) {
entity_list.QueueClientsStatus(this, outapp, true, Admin(), 255);
} else {
entity_list.QueueCloseClients(this, outapp, true, RuleI(Range, ClientPositionUpdates), nullptr, true);
}
/* Always send position updates to group - send when beyond normal ClientPositionUpdate range */
Group *group = this->GetGroup();
Raid *raid = this->GetRaid();
if (raid) {
raid->QueueClients(this, outapp, true, true, (RuleI(Range, ClientPositionUpdates) * -1));
} else if (group) {
group->QueueClients(this, outapp, true, true, (RuleI(Range, ClientPositionUpdates) * -1));
}
safe_delete(outapp);
}
if (zone->watermap) {
if (zone->watermap->InLiquid(glm::vec3(m_Position))) {
CheckIncreaseSkill(EQ::skills::SkillSwimming, nullptr, -17);
// Dismount horses when entering water
if (GetHorseId() && RuleB(Character, DismountWater)) {
SetHorseId(0);
BuffFadeByEffect(SE_SummonHorse);
}
}
CheckRegionTypeChanges();
}
CheckVirtualZoneLines();
}
void Client::Handle_OP_CombatAbility(const EQApplicationPacket *app)
{
if (app->size != sizeof(CombatAbility_Struct)) {
std::cout << "Wrong size on OP_CombatAbility. Got: " << app->size << ", Expected: " << sizeof(CombatAbility_Struct) << std::endl;
return;
}
auto ca_atk = (CombatAbility_Struct *)app->pBuffer;
OPCombatAbility(ca_atk);
return;
}
void Client::Handle_OP_ConfirmDelete(const EQApplicationPacket* app)
{
return;
}
void Client::Handle_OP_Consent(const EQApplicationPacket *app)
{
if (app->size<64) {
Consent_Struct* c = (Consent_Struct*)app->pBuffer;
ConsentCorpses(c->name, false);
}
}
void Client::Handle_OP_ConsentDeny(const EQApplicationPacket *app)
{
if (app->size<64) {
Consent_Struct* c = (Consent_Struct*)app->pBuffer;
ConsentCorpses(c->name, true);
}
}
void Client::Handle_OP_Consider(const EQApplicationPacket *app)
{
if (app->size != sizeof(Consider_Struct))
{
LogDebug("Size mismatch in Consider expected [{}] got [{}]", sizeof(Consider_Struct), app->size);
return;
}
Consider_Struct* conin = (Consider_Struct*)app->pBuffer;
Mob* tmob = entity_list.GetMob(conin->targetid);
if (tmob == 0)
return;
if (parse->EventPlayer(EVENT_CONSIDER, this, fmt::format("{}", conin->targetid), 0) == 1) {
return;
}
if (tmob->GetClass() == LDON_TREASURE)
{
Message(Chat::Yellow, "%s", tmob->GetCleanName());
return;
}
auto outapp = new EQApplicationPacket(OP_Consider, sizeof(Consider_Struct));
Consider_Struct* con = (Consider_Struct*)outapp->pBuffer;
con->playerid = GetID();
con->targetid = conin->targetid;
if (tmob->IsNPC())
con->faction = GetFactionLevel(character_id, tmob->GetNPCTypeID(), GetFactionRace(), class_, deity, (tmob->IsNPC()) ? tmob->CastToNPC()->GetPrimaryFaction() : 0, tmob); // Dec. 20, 2001; TODO: Send the players proper deity
else
con->faction = 1;
con->level = GetLevelCon(tmob->GetLevel());
if (ClientVersion() <= EQ::versions::ClientVersion::Titanium) {
if (con->level == CON_GRAY) {
con->level = CON_GREEN;
}
if (con->level == CON_WHITE) {
con->level = CON_WHITE_TITANIUM;
}
}
if (zone->IsPVPZone()) {
if (!tmob->IsNPC())
con->pvpcon = tmob->CastToClient()->GetPVP();
}
// If we're feigned show NPC as indifferent
if (tmob->IsNPC())
{
if (GetFeigned())
con->faction = FACTION_INDIFFERENT;
}
if (!(con->faction == FACTION_SCOWLS))
{
if (tmob->IsNPC())
{
if (tmob->CastToNPC()->IsOnHatelist(this))
con->faction = FACTION_THREATENLY;
}
}
if (con->faction == FACTION_APPREHENSIVE) {
con->faction = FACTION_SCOWLS;
}
else if (con->faction == FACTION_DUBIOUS) {
con->faction = FACTION_THREATENLY;
}
else if (con->faction == FACTION_SCOWLS) {
con->faction = FACTION_APPREHENSIVE;
}
else if (con->faction == FACTION_THREATENLY) {
con->faction = FACTION_DUBIOUS;
}
mod_consider(tmob, con);
QueuePacket(outapp);
// only wanted to check raid target once
// and need con to still be around so, do it here!
if (tmob->IsRaidTarget()) {
uint32 color = 0;
switch (con->level) {
case CON_GREEN:
color = 2;
break;
case CON_LIGHTBLUE:
color = 10;
break;
case CON_BLUE:
color = 4;
break;
case CON_WHITE_TITANIUM:
case CON_WHITE:
color = 10;
break;
case CON_YELLOW:
color = 15;
break;
case CON_RED:
color = 13;
break;
case CON_GRAY:
color = 6;
break;
}
if (ClientVersion() <= EQ::versions::ClientVersion::Titanium) {
if (color == 6) {
color = 2;
}
}
SendColoredText(color, std::string("This creature would take an army to defeat!"));
}
// this could be done better, but this is only called when you con so w/e
// Shroud of Stealth has a special message
if (improved_hidden && (!tmob->see_improved_hide && (tmob->see_invis || tmob->see_hide)))
MessageString(Chat::NPCQuestSay, SOS_KEEPS_HIDDEN);
// we are trying to hide but they can see us
else if ((invisible || invisible_undead || hidden || invisible_animals) && !IsInvisible(tmob))
MessageString(Chat::NPCQuestSay, SUSPECT_SEES_YOU);
safe_delete(outapp);
return;
}
void Client::Handle_OP_ConsiderCorpse(const EQApplicationPacket *app)
{
if (app->size != sizeof(Consider_Struct))
{
LogDebug("Size mismatch in Consider corpse expected [{}] got [{}]", sizeof(Consider_Struct), app->size);
return;
}
Consider_Struct* conin = (Consider_Struct*)app->pBuffer;
Corpse* tcorpse = entity_list.GetCorpseByID(conin->targetid);
if (tcorpse && tcorpse->IsNPCCorpse()) {
if (parse->EventPlayer(EVENT_CONSIDER_CORPSE, this, fmt::format("{}", conin->targetid), 0) == 1) {
return;
}
uint32 min; uint32 sec; uint32 ttime;
if ((ttime = tcorpse->GetDecayTime()) != 0) {
sec = (ttime / 1000) % 60; // Total seconds
min = (ttime / 60000) % 60; // Total seconds / 60 drop .00
char val1[20] = { 0 };
char val2[20] = { 0 };
MessageString(Chat::NPCQuestSay, CORPSE_DECAY1, ConvertArray(min, val1), ConvertArray(sec, val2));
}
else {
MessageString(Chat::NPCQuestSay, CORPSE_DECAY_NOW);
}
}
else if (tcorpse && tcorpse->IsPlayerCorpse()) {
if (parse->EventPlayer(EVENT_CONSIDER_CORPSE, this, fmt::format("{}", conin->targetid), 0) == 1) {
return;
}
uint32 day, hour, min, sec, ttime;
if ((ttime = tcorpse->GetDecayTime()) != 0) {
sec = (ttime / 1000) % 60; // Total seconds
min = (ttime / 60000) % 60; // Total seconds
hour = (ttime / 3600000) % 24; // Total hours
day = ttime / 86400000; // Total Days
if (day)
Message(0, "This corpse will decay in %i days, %i hours, %i minutes and %i seconds.", day, hour, min, sec);
else if (hour)
Message(0, "This corpse will decay in %i hours, %i minutes and %i seconds.", hour, min, sec);
else
Message(0, "This corpse will decay in %i minutes and %i seconds.", min, sec);
Message(0, "This corpse %s be resurrected.", tcorpse->IsRezzed() ? "cannot" : "can");
}
else {
MessageString(Chat::NPCQuestSay, CORPSE_DECAY_NOW);
}
}
}
void Client::Handle_OP_Consume(const EQApplicationPacket *app)
{
if (app->size != sizeof(Consume_Struct))
{
LogError("OP size error: OP_Consume expected:[{}] got:[{}]", sizeof(Consume_Struct), app->size);
return;
}
Consume_Struct* pcs = (Consume_Struct*)app->pBuffer;
if (pcs->type == 0x01)
{
if (m_pp.hunger_level > 6000)
{
EQApplicationPacket *outapp = nullptr;
outapp = new EQApplicationPacket(OP_Stamina, sizeof(Stamina_Struct));
Stamina_Struct* sta = (Stamina_Struct*)outapp->pBuffer;
sta->food = m_pp.hunger_level > 6000 ? 6000 : m_pp.hunger_level;
sta->water = m_pp.thirst_level > 6000 ? 6000 : m_pp.thirst_level;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
}
else if (pcs->type == 0x02)
{
if (m_pp.thirst_level > 6000)
{
EQApplicationPacket *outapp = nullptr;
outapp = new EQApplicationPacket(OP_Stamina, sizeof(Stamina_Struct));
Stamina_Struct* sta = (Stamina_Struct*)outapp->pBuffer;
sta->food = m_pp.hunger_level > 6000 ? 6000 : m_pp.hunger_level;
sta->water = m_pp.thirst_level > 6000 ? 6000 : m_pp.thirst_level;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
}
EQ::ItemInstance *myitem = GetInv().GetItem(pcs->slot);
if (myitem == nullptr) {
LogError("Consuming from empty slot [{}]", pcs->slot);
return;
}
const EQ::ItemData* eat_item = myitem->GetItem();
if (pcs->type == 0x01) {
Consume(eat_item, EQ::item::ItemTypeFood, pcs->slot, (pcs->auto_consumed == 0xffffffff));
}
else if (pcs->type == 0x02) {
Consume(eat_item, EQ::item::ItemTypeDrink, pcs->slot, (pcs->auto_consumed == 0xffffffff));
}
else {
LogError("OP_Consume: unknown type, type:[{}]", (int)pcs->type);
return;
}
if (m_pp.hunger_level > 50000)
m_pp.hunger_level = 50000;
if (m_pp.thirst_level > 50000)
m_pp.thirst_level = 50000;
EQApplicationPacket *outapp = nullptr;
outapp = new EQApplicationPacket(OP_Stamina, sizeof(Stamina_Struct));
Stamina_Struct* sta = (Stamina_Struct*)outapp->pBuffer;
sta->food = m_pp.hunger_level > 6000 ? 6000 : m_pp.hunger_level;
sta->water = m_pp.thirst_level > 6000 ? 6000 : m_pp.thirst_level;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_OP_ControlBoat(const EQApplicationPacket *app)
{
if (app->size != sizeof(ControlBoat_Struct)) {
LogError("Wrong size: OP_ControlBoat, size=[{}], expected [{}]", app->size, sizeof(ControlBoat_Struct));
return;
}
ControlBoat_Struct* cbs = (ControlBoat_Struct*)app->pBuffer;
Mob* boat = entity_list.GetMob(cbs->boatId);
if (!boat) {
LogError("Player tried to take control of non-existent boat (char_id: %u, boat_eid: %u)", CharacterID(), cbs->boatId);
return; // do nothing if the boat isn't valid
}
if (!boat->IsNPC() || !boat->IsControllableBoat())
{
auto hacked_string = fmt::format("OP_Control Boat was sent against {} which is of race {}", boat->GetName(), boat->GetRace());
database.SetMQDetectionFlag(this->AccountName(), this->GetName(), hacked_string, zone->GetShortName());
return;
}
if (cbs->TakeControl) {
// this uses the boat's target to indicate who has control of it. It has to check hate to make sure the boat isn't actually attacking anyone.
if (!boat->GetTarget() || (boat->GetTarget() == this && boat->GetHateAmount(this) == 0)) {
boat->SetTarget(this);
}
else {
this->MessageString(Chat::Red, IN_USE);
return;
}
}
else {
if (boat->GetTarget() == this) {
boat->SetTarget(nullptr);
}
}
// client responds better to a packet echo than an empty op
QueuePacket(app);
// have the boat signal itself, so quests can be triggered by boat use
boat->CastToNPC()->SignalNPC(0);
}
void Client::Handle_OP_CorpseDrag(const EQApplicationPacket *app)
{
if (DraggedCorpses.size() >= (unsigned int)RuleI(Character, MaxDraggedCorpses))
{
MessageString(Chat::Red, CORPSEDRAG_LIMIT);
return;
}
VERIFY_PACKET_LENGTH(OP_CorpseDrag, app, CorpseDrag_Struct);
CorpseDrag_Struct *cds = (CorpseDrag_Struct*)app->pBuffer;
Mob* corpse = entity_list.GetMob(cds->CorpseName);
if (!corpse || !corpse->IsPlayerCorpse() || corpse->CastToCorpse()->IsBeingLooted())
return;
Client *c = entity_list.FindCorpseDragger(corpse->GetID());
if (c)
{
if (c == this)
MessageString(Chat::DefaultText, CORPSEDRAG_ALREADY, corpse->GetCleanName());
else
MessageString(Chat::DefaultText, CORPSEDRAG_SOMEONE_ELSE, corpse->GetCleanName());
return;
}
if (!corpse->CastToCorpse()->Summon(this, false, true))
return;
DraggedCorpses.push_back(std::pair<std::string, uint16>(cds->CorpseName, corpse->GetID()));
MessageString(Chat::DefaultText, CORPSEDRAG_BEGIN, cds->CorpseName);
}
void Client::Handle_OP_CorpseDrop(const EQApplicationPacket *app)
{
if (app->size == 1)
{
MessageString(Chat::DefaultText, CORPSEDRAG_STOPALL);
ClearDraggedCorpses();
return;
}
for (auto Iterator = DraggedCorpses.begin(); Iterator != DraggedCorpses.end(); ++Iterator)
{
if (!strcasecmp(Iterator->first.c_str(), (const char *)app->pBuffer))
{
MessageString(Chat::DefaultText, CORPSEDRAG_STOP);
Iterator = DraggedCorpses.erase(Iterator);
return;
}
}
}
void Client::Handle_OP_CrashDump(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_CreateObject(const EQApplicationPacket *app)
{
if (LogSys.log_settings[Logs::Inventory].is_category_enabled)
LogInventory("Handle_OP_CreateObject() [psize: [{}]] [{}]", app->size, DumpPacketToString(app).c_str());
DropItem(EQ::invslot::slotCursor);
return;
}
void Client::Handle_OP_CrystalCreate(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_CrystalCreate, app, CrystalReclaim_Struct);
CrystalReclaim_Struct *cr = (CrystalReclaim_Struct*)app->pBuffer;
const uint32 requestQty = cr->amount;
const bool isRadiant = cr->type == 4;
const bool isEbon = cr->type == 5;
// Check: Valid type requested.
if (!isRadiant && !isEbon) {
return;
}
// Check: Valid quantity requested.
if (requestQty < 1) {
return;
}
// Check: Valid client state to make request.
// In this situation the client is either desynced or attempting an exploit.
const uint32 currentQty = isRadiant ? GetRadiantCrystals() : GetEbonCrystals();
if (currentQty == 0) {
return;
}
// Prevent the client from creating more than they have.
const uint32 amount = EQ::ClampUpper(requestQty, currentQty);
const uint32 itemID = isRadiant ? RuleI(Zone, RadiantCrystalItemID) : RuleI(Zone, EbonCrystalItemID);
// Summon crystals for player.
const bool success = SummonItem(itemID, amount);
if (!success) {
return;
}
// Deduct crystals from client and update them.
if (isRadiant) {
m_pp.currentRadCrystals -= amount;
m_pp.careerRadCrystals -= amount;
}
else if (isEbon) {
m_pp.currentEbonCrystals -= amount;
m_pp.careerEbonCrystals -= amount;
}
SaveCurrency();
SendCrystalCounts();
}
void Client::Handle_OP_CrystalReclaim(const EQApplicationPacket *app)
{
uint32 ebon = NukeItem(RuleI(Zone, EbonCrystalItemID), invWhereWorn | invWherePersonal | invWhereCursor);
uint32 radiant = NukeItem(RuleI(Zone, RadiantCrystalItemID), invWhereWorn | invWherePersonal | invWhereCursor);
if ((ebon + radiant) > 0) {
AddCrystals(radiant, ebon);
}
}
void Client::Handle_OP_Damage(const EQApplicationPacket *app)
{
if (app->size != sizeof(CombatDamage_Struct)) {
LogError("Received invalid sized OP_Damage: got [{}], expected [{}]", app->size, sizeof(CombatDamage_Struct));
DumpPacket(app);
return;
}
// Broadcast to other clients
CombatDamage_Struct* damage = (CombatDamage_Struct*)app->pBuffer;
//dont send to originator of falling damage packets
entity_list.QueueClients(this, app, (damage->type == DamageTypeFalling));
return;
}
void Client::Handle_OP_Death(const EQApplicationPacket *app)
{
if (app->size != sizeof(Death_Struct))
return;
Death_Struct* ds = (Death_Struct*)app->pBuffer;
//I think this attack_skill value is really a value from SkillDamageTypes...
if (ds->attack_skill > EQ::skills::HIGHEST_SKILL) {
return;
}
if (GetHP() > 0)
return;
Mob* killer = entity_list.GetMob(ds->killer_id);
Death(killer, ds->damage, ds->spell_id, (EQ::skills::SkillType)ds->attack_skill);
return;
}
void Client::Handle_OP_DelegateAbility(const EQApplicationPacket *app)
{
if (app->size != sizeof(DelegateAbility_Struct))
{
LogDebug("Size mismatch in OP_DelegateAbility expected [{}] got [{}]", sizeof(DelegateAbility_Struct), app->size);
DumpPacket(app);
return;
}
DelegateAbility_Struct* das = (DelegateAbility_Struct*)app->pBuffer;
Group *g = GetGroup();
if (!g) return;
switch (das->DelegateAbility)
{
case 0:
{
g->DelegateMainAssist(das->Name);
break;
}
case 1:
{
g->DelegateMarkNPC(das->Name);
break;
}
case 2:
{
g->DelegateMainTank(das->Name);
break;
}
case 3:
{
g->DelegatePuller(das->Name);
break;
}
default:
break;
}
}
void Client::Handle_OP_DeleteItem(const EQApplicationPacket *app)
{
if (app->size != sizeof(DeleteItem_Struct)) {
std::cout << "Wrong size on OP_DeleteItem. Got: " << app->size << ", Expected: " << sizeof(DeleteItem_Struct) << std::endl;
return;
}
DeleteItem_Struct* alc = (DeleteItem_Struct*)app->pBuffer;
const EQ::ItemInstance *inst = GetInv().GetItem(alc->from_slot);
if (inst && inst->GetItem()->ItemType == EQ::item::ItemTypeAlcohol) {
entity_list.MessageCloseString(this, true, 50, 0, DRINKING_MESSAGE, GetName(), inst->GetItem()->Name);
CheckIncreaseSkill(EQ::skills::SkillAlcoholTolerance, nullptr, 25);
int16 AlcoholTolerance = GetSkill(EQ::skills::SkillAlcoholTolerance);
int16 IntoxicationIncrease;
if (ClientVersion() < EQ::versions::ClientVersion::SoD)
IntoxicationIncrease = (200 - AlcoholTolerance) * 30 / 200 + 10;
else
IntoxicationIncrease = (270 - AlcoholTolerance) * 0.111111108 + 10;
if (IntoxicationIncrease < 0)
IntoxicationIncrease = 1;
m_pp.intoxication += IntoxicationIncrease;
if (m_pp.intoxication > 200)
m_pp.intoxication = 200;
}
DeleteItemInInventory(alc->from_slot, 1);
return;
}
void Client::Handle_OP_DeleteSpawn(const EQApplicationPacket *app)
{
// The client will send this with his id when he zones, maybe when he disconnects too?
//eqs->RemoveData(); // Flushing the queue of packet data to allow for proper zoning
//just make sure this gets out
auto outapp = new EQApplicationPacket(OP_LogoutReply);
FastQueuePacket(&outapp);
outapp = new EQApplicationPacket(OP_DeleteSpawn, sizeof(EntityId_Struct));
EntityId_Struct* eid = (EntityId_Struct*)outapp->pBuffer;
eid->entity_id = GetID();
entity_list.QueueClients(this, outapp, false);
safe_delete(outapp);
hate_list.RemoveEntFromHateList(this->CastToMob());
Disconnect();
return;
}
void Client::Handle_OP_Disarm(const EQApplicationPacket *app) {
if (dead || bZoning) return;
if (!HasSkill(EQ::skills::SkillDisarm))
return;
if (app->size != sizeof(Disarm_Struct)) {
LogSkills("Size mismatch for Disarm_Struct packet");
return;
}
Disarm_Struct *disarm = (Disarm_Struct *)app->pBuffer;
if (!p_timers.Expired(&database, pTimerCombatAbility2, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerCombatAbility2, 8);
BreakInvis();
Mob* pmob = entity_list.GetMob(disarm->source);
Mob* tmob = entity_list.GetMob(disarm->target);
if (!pmob || !tmob)
return;
if (pmob->GetID() != GetID()) {
// Client sent a disarm request with an originator ID not matching their own ID.
auto hack_str = fmt::format("Player {} ({}) sent OP_Disarm with source ID of: {}", GetCleanName(), GetID(), pmob->GetID());
database.SetMQDetectionFlag(this->account_name, this->name, hack_str, zone->GetShortName());
return;
}
// No disarm on corpses
if (tmob->IsCorpse())
return;
// No target
if (!GetTarget())
return;
// Targets don't match (possible hack, but not flagging)
if (GetTarget() != tmob) {
return;
}
// Too far away
if (pmob->CalculateDistance(GetTarget()->GetX(), GetTarget()->GetY(), GetTarget()->GetZ()) > 400)
return;
// Can't see mob
//if (tmob->BehindMob(pmob))
// return;
// How can we disarm someone if we are feigned.
if (GetFeigned())
return;
// We can't disarm someone who is feigned.
if (tmob->IsClient() && tmob->CastToClient()->GetFeigned())
return;
if (GetTarget() == tmob && pmob == this->CastToMob() &&
disarm->skill == GetSkill(EQ::skills::SkillDisarm) && IsAttackAllowed(tmob)) {
int p_level = pmob->GetLevel() ? pmob->GetLevel() : 1;
int t_level = tmob->GetLevel() ? tmob->GetLevel() : 1;
// We have a disarmable target - sucess or fail, we always aggro the mob
if (tmob->IsNPC()) {
if (!tmob->CheckAggro(pmob)) {
zone->AddAggroMob();
tmob->AddToHateList(pmob, p_level);
}
else {
tmob->AddToHateList(pmob, p_level / 3);
}
}
int chance = GetSkill(EQ::skills::SkillDisarm); // (1% @ 0 skill) (11% @ 200 skill) - against even con
chance /= 2;
chance += 10;
// Modify chance based on level difference
float lvl_mod = p_level / t_level;
chance *= lvl_mod;
if (chance > 300)
chance = 300; // max chance of 30%
if (tmob->IsNPC()) {
tmob->CastToNPC()->Disarm(this, chance);
}
else if (tmob->IsClient()) {
tmob->CastToClient()->Disarm(this, chance);
}
return;
}
// Trying to disarm something we can't disarm
MessageString(Chat::Skills, DISARM_NO_TARGET);
return;
}
void Client::Handle_OP_DeleteSpell(const EQApplicationPacket *app)
{
if (app->size != sizeof(DeleteSpell_Struct))
return;
EQApplicationPacket* outapp = app->Copy();
DeleteSpell_Struct* dss = (DeleteSpell_Struct*)outapp->pBuffer;
if (dss->spell_slot < 0 || dss->spell_slot >= EQ::spells::DynamicLookup(ClientVersion(), GetGM())->SpellbookSize)
return;
if (m_pp.spell_book[dss->spell_slot] != SPELLBOOK_UNKNOWN) {
m_pp.spell_book[dss->spell_slot] = SPELLBOOK_UNKNOWN;
database.DeleteCharacterSpell(this->CharacterID(), m_pp.spell_book[dss->spell_slot], dss->spell_slot);
dss->success = 1;
}
else
dss->success = 0;
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_DisarmTraps(const EQApplicationPacket *app)
{
if (!HasSkill(EQ::skills::SkillDisarmTraps))
return;
if (!p_timers.Expired(&database, pTimerDisarmTraps, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
int reuse = DisarmTrapsReuseTime - GetSkillReuseTime(EQ::skills::SkillDisarmTraps);
if (reuse < 1)
reuse = 1;
p_timers.Start(pTimerDisarmTraps, reuse - 1);
uint8 success = SKILLUP_FAILURE;
float curdist = 0;
Trap* trap = entity_list.FindNearbyTrap(this, 250, curdist, true);
if (trap && trap->detected)
{
float max_radius = (trap->radius * 2) * (trap->radius * 2); // radius is used to trigger trap, so disarm radius should be a bit bigger.
Log(Logs::General, Logs::Traps, "%s is attempting to disarm trap %d. Curdist is %0.2f maxdist is %0.2f", GetName(), trap->trap_id, curdist, max_radius);
if (curdist <= max_radius)
{
int uskill = GetSkill(EQ::skills::SkillDisarmTraps);
if ((zone->random.Int(0, 49) + uskill) >= (zone->random.Int(0, 49) + trap->skill))
{
success = SKILLUP_SUCCESS;
MessageString(Chat::Skills, DISARMED_TRAP);
trap->disarmed = true;
Log(Logs::General, Logs::Traps, "Trap %d is disarmed.", trap->trap_id);
trap->UpdateTrap();
}
else
{
MessageString(Chat::Skills, FAIL_DISARM_DETECTED_TRAP);
if (zone->random.Int(0, 99) < 25) {
trap->Trigger(this);
}
}
CheckIncreaseSkill(EQ::skills::SkillDisarmTraps, nullptr);
return;
}
else
{
MessageString(Chat::Skills, TRAP_TOO_FAR);
}
}
else
{
MessageString(Chat::Skills, LDON_SENSE_TRAP2);
}
return;
}
void Client::Handle_OP_DoGroupLeadershipAbility(const EQApplicationPacket *app)
{
if (app->size != sizeof(DoGroupLeadershipAbility_Struct)) {
LogDebug("Size mismatch in OP_DoGroupLeadershipAbility expected [{}] got [{}]", sizeof(DoGroupLeadershipAbility_Struct), app->size);
DumpPacket(app);
return;
}
DoGroupLeadershipAbility_Struct* dglas = (DoGroupLeadershipAbility_Struct*)app->pBuffer;
switch (dglas->Ability)
{
case GroupLeadershipAbility_MarkNPC:
{
if (GetTarget())
{
Group* g = GetGroup();
if (g)
g->MarkNPC(GetTarget(), dglas->Parameter);
}
break;
}
case groupAAInspectBuffs:
{
Mob *Target = GetTarget();
if (!Target || !Target->IsClient())
return;
if (IsRaidGrouped()) {
Raid *raid = GetRaid();
if (!raid)
return;
uint32 group_id = raid->GetGroup(this);
if (group_id > 11 || raid->GroupCount(group_id) < 3)
return;
Target->CastToClient()->InspectBuffs(this, raid->GetLeadershipAA(groupAAInspectBuffs, group_id));
return;
}
Group *g = GetGroup();
if (!g || (g->GroupCount() < 3))
return;
Target->CastToClient()->InspectBuffs(this, g->GetLeadershipAA(groupAAInspectBuffs));
break;
}
default:
LogDebug("Got unhandled OP_DoGroupLeadershipAbility Ability: [{}] Parameter: [{}]", dglas->Ability, dglas->Parameter);
break;
}
}
void Client::Handle_OP_DuelResponse(const EQApplicationPacket *app)
{
if (app->size != sizeof(DuelResponse_Struct))
return;
DuelResponse_Struct* ds = (DuelResponse_Struct*)app->pBuffer;
Entity* entity = entity_list.GetID(ds->target_id);
Entity* initiator = entity_list.GetID(ds->entity_id);
if (!entity->IsClient() || !initiator->IsClient())
return;
entity->CastToClient()->SetDuelTarget(0);
entity->CastToClient()->SetDueling(false);
initiator->CastToClient()->SetDuelTarget(0);
initiator->CastToClient()->SetDueling(false);
if (GetID() == initiator->GetID())
entity->CastToClient()->MessageString(Chat::NPCQuestSay, DUEL_DECLINE, initiator->GetName());
else
initiator->CastToClient()->MessageString(Chat::NPCQuestSay, DUEL_DECLINE, entity->GetName());
return;
}
void Client::Handle_OP_DuelResponse2(const EQApplicationPacket *app)
{
if (app->size != sizeof(Duel_Struct))
return;
Duel_Struct* ds = (Duel_Struct*)app->pBuffer;
Entity* entity = entity_list.GetID(ds->duel_target);
Entity* initiator = entity_list.GetID(ds->duel_initiator);
if (entity && initiator && entity == this && initiator->IsClient()) {
auto outapp = new EQApplicationPacket(OP_RequestDuel, sizeof(Duel_Struct));
Duel_Struct* ds2 = (Duel_Struct*)outapp->pBuffer;
ds2->duel_initiator = entity->GetID();
ds2->duel_target = entity->GetID();
initiator->CastToClient()->QueuePacket(outapp);
outapp->SetOpcode(OP_DuelResponse2);
ds2->duel_initiator = initiator->GetID();
initiator->CastToClient()->QueuePacket(outapp);
QueuePacket(outapp);
SetDueling(true);
initiator->CastToClient()->SetDueling(true);
SetDuelTarget(ds->duel_initiator);
safe_delete(outapp);
if (IsCasting())
InterruptSpell();
if (initiator->CastToClient()->IsCasting())
initiator->CastToClient()->InterruptSpell();
}
return;
}
void Client::Handle_OP_DumpName(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_Dye(const EQApplicationPacket *app)
{
if (app->size != sizeof(EQ::TintProfile))
printf("Wrong size of DyeStruct, Got: %i, Expected: %zu\n", app->size, sizeof(EQ::TintProfile));
else {
EQ::TintProfile* dye = (EQ::TintProfile*)app->pBuffer;
DyeArmor(dye);
}
return;
}
void Client::Handle_OP_DzAddPlayer(const EQApplicationPacket *app)
{
auto expedition = GetExpedition();
if (expedition)
{
auto dzcmd = reinterpret_cast<ExpeditionCommand_Struct*>(app->pBuffer);
expedition->DzAddPlayer(this, dzcmd->name);
}
else
{
// the only /dz command that sends an error message if no active expedition
Message(Chat::System, DZ_YOU_NOT_ASSIGNED);
}
}
void Client::Handle_OP_DzChooseZoneReply(const EQApplicationPacket *app)
{
auto dzmsg = reinterpret_cast<DynamicZoneChooseZoneReply_Struct*>(app->pBuffer);
LogDynamicZones("Character [{}] chose DynamicZone [{}]:[{}] type: [{}] with system id: [{}]",
CharacterID(), dzmsg->dz_zone_id, dzmsg->dz_instance_id, dzmsg->dz_type, dzmsg->unknown_id2);
if (!dzmsg->dz_instance_id || !database.VerifyInstanceAlive(dzmsg->dz_instance_id, CharacterID()))
{
// live just no-ops this without a message
LogDynamicZones("Character [{}] chose invalid DynamicZone [{}]:[{}] or is no longer a member",
CharacterID(), dzmsg->dz_zone_id, dzmsg->dz_instance_id);
return;
}
auto client_dzs = GetDynamicZones();
auto it = std::find_if(client_dzs.begin(), client_dzs.end(), [&](const DynamicZone* dz) {
return dz->IsSameDz(dzmsg->dz_zone_id, dzmsg->dz_instance_id); });
if (it != client_dzs.end())
{
DynamicZoneLocation loc = (*it)->GetZoneInLocation();
ZoneMode zone_mode = (*it)->HasZoneInLocation() ? ZoneMode::ZoneSolicited : ZoneMode::ZoneToSafeCoords;
MovePC(dzmsg->dz_zone_id, dzmsg->dz_instance_id, loc.x, loc.y, loc.z, loc.heading, 0, zone_mode);
}
}
void Client::Handle_OP_DzExpeditionInviteResponse(const EQApplicationPacket *app)
{
auto expedition = Expedition::FindCachedExpeditionByID(m_pending_expedition_invite.expedition_id);
std::string swap_remove_name = m_pending_expedition_invite.swap_remove_name;
m_pending_expedition_invite = { 0 }; // clear before re-validating
if (expedition)
{
auto dzmsg = reinterpret_cast<ExpeditionInviteResponse_Struct*>(app->pBuffer);
expedition->DzInviteResponse(this, dzmsg->accepted, swap_remove_name);
}
}
void Client::Handle_OP_DzListTimers(const EQApplicationPacket *app)
{
DzListTimers();
}
void Client::Handle_OP_DzMakeLeader(const EQApplicationPacket *app)
{
auto expedition = GetExpedition();
if (expedition)
{
auto dzcmd = reinterpret_cast<ExpeditionCommand_Struct*>(app->pBuffer);
expedition->DzMakeLeader(this, dzcmd->name);
}
}
void Client::Handle_OP_DzPlayerList(const EQApplicationPacket *app)
{
auto expedition = GetExpedition();
if (expedition) {
expedition->DzPlayerList(this);
}
}
void Client::Handle_OP_DzRemovePlayer(const EQApplicationPacket *app)
{
auto expedition = GetExpedition();
if (expedition)
{
auto dzcmd = reinterpret_cast<ExpeditionCommand_Struct*>(app->pBuffer);
expedition->DzRemovePlayer(this, dzcmd->name);
}
}
void Client::Handle_OP_DzSwapPlayer(const EQApplicationPacket *app)
{
auto expedition = GetExpedition();
if (expedition)
{
auto dzcmd = reinterpret_cast<ExpeditionCommandSwap_Struct*>(app->pBuffer);
expedition->DzSwapPlayer(this, dzcmd->rem_player_name, dzcmd->add_player_name);
}
}
void Client::Handle_OP_DzQuit(const EQApplicationPacket *app)
{
auto expedition = GetExpedition();
if (expedition) {
expedition->DzQuit(this);
}
}
void Client::Handle_OP_Emote(const EQApplicationPacket *app)
{
if (app->size != sizeof(Emote_Struct)) {
LogError("Received invalid sized OP_Emote: got [{}], expected [{}]", app->size, sizeof(Emote_Struct));
DumpPacket(app);
return;
}
// Calculate new packet dimensions
Emote_Struct* in = (Emote_Struct*)app->pBuffer;
in->message[1023] = '\0';
const char* name = GetName();
uint32 len_name = strlen(name);
uint32 len_msg = strlen(in->message);
// crash protection -- cheater
if (len_msg > 512) {
in->message[512] = '\0';
len_msg = 512;
}
uint32 len_packet = sizeof(in->type) + len_name
+ len_msg + 1;
// Construct outgoing packet
auto outapp = new EQApplicationPacket(OP_Emote, len_packet);
Emote_Struct* out = (Emote_Struct*)outapp->pBuffer;
out->type = in->type;
memcpy(out->message, name, len_name);
memcpy(&out->message[len_name], in->message, len_msg);
/*
if (target && target->IsClient()) {
entity_list.QueueCloseClients(this, outapp, false, 100, target);
cptr = outapp->pBuffer + 2;
// not sure if live does this or not. thought it was a nice feature, but would take a lot to
// clean up grammatical and other errors. Maybe with a regex parser...
replacestr((char *)cptr, target->GetName(), "you");
replacestr((char *)cptr, " he", " you");
replacestr((char *)cptr, " she", " you");
replacestr((char *)cptr, " him", " you");
replacestr((char *)cptr, " her", " you");
target->CastToClient()->QueuePacket(outapp);
}
else
*/
entity_list.QueueCloseClients(this, outapp, true, RuleI(Range, Emote), 0, true, FilterSocials);
safe_delete(outapp);
return;
}
void Client::Handle_OP_EndLootRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
std::cout << "Wrong size: OP_EndLootRequest, size=" << app->size << ", expected " << sizeof(uint32) << std::endl;
return;
}
SetLooting(0);
Entity* entity = entity_list.GetID(*((uint16*)app->pBuffer));
if (entity == 0) {
Message(Chat::Red, "Error: OP_EndLootRequest: Corpse not found (ent = 0)");
if (ClientVersion() >= EQ::versions::ClientVersion::SoD)
Corpse::SendEndLootErrorPacket(this);
else
Corpse::SendLootReqErrorPacket(this);
return;
}
else if (!entity->IsCorpse()) {
Message(Chat::Red, "Error: OP_EndLootRequest: Corpse not found (!entity->IsCorpse())");
Corpse::SendLootReqErrorPacket(this);
return;
}
else {
entity->CastToCorpse()->EndLoot(this, app);
}
return;
}
void Client::Handle_OP_EnvDamage(const EQApplicationPacket *app)
{
if (!ClientFinishedLoading())
{
SetHP(GetHP() - 1);
return;
}
if (app->size != sizeof(EnvDamage2_Struct)) {
LogError("Received invalid sized OP_EnvDamage: got [{}], expected [{}]", app->size, sizeof(EnvDamage2_Struct));
DumpPacket(app);
return;
}
EnvDamage2_Struct* ed = (EnvDamage2_Struct*)app->pBuffer;
int damage = ed->damage;
if (ed->dmgtype == 252) {
int mod = spellbonuses.ReduceFallDamage + itembonuses.ReduceFallDamage + aabonuses.ReduceFallDamage;
damage -= damage * mod / 100;
}
if (damage < 0)
damage = 31337;
if (admin >= minStatusToAvoidFalling && GetGM()) {
Message(Chat::Red, "Your GM status protects you from %i points of type %i environmental damage.", ed->damage, ed->dmgtype);
SetHP(GetHP() - 1);//needed or else the client wont acknowledge
return;
}
else if (GetInvul()) {
Message(Chat::Red, "Your invuln status protects you from %i points of type %i environmental damage.", ed->damage, ed->dmgtype);
SetHP(GetHP() - 1);//needed or else the client wont acknowledge
return;
}
else if (zone->GetZoneID() == 183 || zone->GetZoneID() == 184) {
// Hard coded tutorial and load zones for no fall damage
return;
}
else {
SetHP(GetHP() - (damage * RuleR(Character, EnvironmentDamageMulipliter)));
/* EVENT_ENVIRONMENTAL_DAMAGE */
int final_damage = (damage * RuleR(Character, EnvironmentDamageMulipliter));
char buf[24];
snprintf(buf, 23, "%u %u %i", ed->damage, ed->dmgtype, final_damage);
parse->EventPlayer(EVENT_ENVIRONMENTAL_DAMAGE, this, buf, 0);
}
if (GetHP() <= 0) {
mod_client_death_env();
Death(0, 32000, SPELL_UNKNOWN, EQ::skills::SkillHandtoHand);
}
SendHPUpdate();
return;
}
void Client::Handle_OP_FaceChange(const EQApplicationPacket *app)
{
if (app->size != sizeof(FaceChange_Struct)) {
LogError("Invalid size for OP_FaceChange: Expected: [{}], Got: [{}]",
sizeof(FaceChange_Struct), app->size);
return;
}
// Notify other clients in zone
entity_list.QueueClients(this, app, false);
FaceChange_Struct* fc = (FaceChange_Struct*)app->pBuffer;
m_pp.haircolor = fc->haircolor;
m_pp.beardcolor = fc->beardcolor;
m_pp.eyecolor1 = fc->eyecolor1;
m_pp.eyecolor2 = fc->eyecolor2;
m_pp.hairstyle = fc->hairstyle;
m_pp.face = fc->face;
m_pp.beard = fc->beard;
m_pp.drakkin_heritage = fc->drakkin_heritage;
m_pp.drakkin_tattoo = fc->drakkin_tattoo;
m_pp.drakkin_details = fc->drakkin_details;
Save();
MessageString(Chat::Red, FACE_ACCEPTED);
//Message(Chat::Red, "Facial features updated.");
return;
}
void Client::Handle_OP_FeignDeath(const EQApplicationPacket *app)
{
if (GetClass() != MONK)
return;
if (!p_timers.Expired(&database, pTimerFeignDeath, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
int reuse = FeignDeathReuseTime;
reuse -= GetSkillReuseTime(EQ::skills::SkillFeignDeath);
if (reuse < 1)
reuse = 1;
p_timers.Start(pTimerFeignDeath, reuse - 1);
//BreakInvis();
uint16 primfeign = GetSkill(EQ::skills::SkillFeignDeath);
uint16 secfeign = GetSkill(EQ::skills::SkillFeignDeath);
if (primfeign > 100) {
primfeign = 100;
secfeign = secfeign - 100;
secfeign = secfeign / 2;
}
else
secfeign = 0;
uint16 totalfeign = primfeign + secfeign;
if (zone->random.Real(0, 160) > totalfeign) {
SetFeigned(false);
entity_list.MessageCloseString(this, false, 200, 10, STRING_FEIGNFAILED, GetName());
}
else {
SetFeigned(true);
}
CheckIncreaseSkill(EQ::skills::SkillFeignDeath, nullptr, 5);
return;
}
void Client::Handle_OP_FindPersonRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(FindPersonRequest_Struct))
printf("Error in FindPersonRequest_Struct. Expected size of: %zu, but got: %i\n", sizeof(FindPersonRequest_Struct), app->size);
else {
FindPersonRequest_Struct* t = (FindPersonRequest_Struct*)app->pBuffer;
std::vector<FindPerson_Point> points;
Mob* target = entity_list.GetMob(t->npc_id);
if (target == nullptr) {
//empty length packet == not found.
EQApplicationPacket outapp(OP_FindPersonReply, 0);
QueuePacket(&outapp);
return;
}
if (!RuleB(Pathing, Find) && RuleB(Bazaar, EnableWarpToTrader) && target->IsClient() && (target->CastToClient()->Trader ||
target->CastToClient()->Buyer)) {
Message(Chat::Yellow, "Moving you to Trader %s", target->GetName());
MovePC(zone->GetZoneID(), zone->GetInstanceID(), target->GetX(), target->GetY(), target->GetZ(), 0.0f);
}
if (!RuleB(Pathing, Find) || !zone->pathing)
{
//fill in the path array...
//
points.clear();
FindPerson_Point a;
FindPerson_Point b;
a.x = GetX();
a.y = GetY();
a.z = GetZ();
b.x = target->GetX();
b.y = target->GetY();
b.z = target->GetZ();
points.push_back(a);
points.push_back(b);
}
else
{
glm::vec3 Start(GetX(), GetY(), GetZ() + (GetSize() < 6.0 ? 6 : GetSize()) * HEAD_POSITION);
glm::vec3 End(target->GetX(), target->GetY(), target->GetZ() + (target->GetSize() < 6.0 ? 6 : target->GetSize()) * HEAD_POSITION);
bool partial = false;
bool stuck = false;
auto pathlist = zone->pathing->FindRoute(Start, End, partial, stuck);
if (pathlist.empty() || partial)
{
EQApplicationPacket outapp(OP_FindPersonReply, 0);
QueuePacket(&outapp);
return;
}
// Live appears to send the points in this order:
// Final destination.
// Current Position.
// rest of the points.
FindPerson_Point p;
int PointNumber = 0;
bool LeadsToTeleporter = false;
auto v = pathlist.back();
p.x = v.pos.x;
p.y = v.pos.y;
p.z = v.pos.z;
points.push_back(p);
p.x = GetX();
p.y = GetY();
p.z = GetZ();
points.push_back(p);
for (auto Iterator = pathlist.begin(); Iterator != pathlist.end(); ++Iterator)
{
if ((*Iterator).teleport) // Teleporter
{
LeadsToTeleporter = true;
break;
}
glm::vec3 v = (*Iterator).pos;
p.x = v.x;
p.y = v.y;
p.z = v.z;
points.push_back(p);
++PointNumber;
}
if (!LeadsToTeleporter)
{
p.x = target->GetX();
p.y = target->GetY();
p.z = target->GetZ();
points.push_back(p);
}
}
SendPathPacket(points);
}
}
void Client::Handle_OP_Fishing(const EQApplicationPacket *app)
{
if (!p_timers.Expired(&database, pTimerFishing, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
if (CanFish()) {
parse->EventPlayer(EVENT_FISH_START, this, "", 0);
//these will trigger GoFish() after a delay if we're able to actually fish, and if not, we won't stop the client from trying again immediately (although we may need to tell it to repop the button)
p_timers.Start(pTimerFishing, FishingReuseTime - 1);
fishing_timer.Start();
}
return;
// Changes made based on Bobs work on foraging. Now can set items in the forage database table to
// forage for.
}
void Client::Handle_OP_Forage(const EQApplicationPacket *app)
{
if (!p_timers.Expired(&database, pTimerForaging, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerForaging, ForagingReuseTime - 1);
ForageItem();
return;
}
void Client::Handle_OP_FriendsWho(const EQApplicationPacket *app)
{
char *FriendsString = (char*)app->pBuffer;
FriendsWho(FriendsString);
return;
}
void Client::Handle_OP_GetGuildMOTD(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GetGuildMOTD");
SendGuildMOTD(true);
if (IsInAGuild())
{
SendGuildURL();
SendGuildChannel();
}
}
void Client::Handle_OP_GetGuildsList(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GetGuildsList");
SendGuildList();
}
void Client::Handle_OP_GMBecomeNPC(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/becomenpc");
return;
}
if (app->size != sizeof(BecomeNPC_Struct)) {
LogError("Wrong size: OP_GMBecomeNPC, size=[{}], expected [{}]", app->size, sizeof(BecomeNPC_Struct));
return;
}
//entity_list.QueueClients(this, app, false);
BecomeNPC_Struct* bnpc = (BecomeNPC_Struct*)app->pBuffer;
Mob* cli = (Mob*)entity_list.GetMob(bnpc->id);
if (cli == 0)
return;
if (cli->IsClient())
cli->CastToClient()->QueuePacket(app);
cli->SendAppearancePacket(AT_NPCName, 1, true);
cli->CastToClient()->SetBecomeNPC(true);
cli->CastToClient()->SetBecomeNPCLevel(bnpc->maxlevel);
cli->MessageString(Chat::White, TOGGLE_OFF);
cli->CastToClient()->tellsoff = true;
//TODO: Make this toggle a BecomeNPC flag so that it gets updated when people zone in as well; Make combat work with this.
return;
}
void Client::Handle_OP_GMDelCorpse(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMDelCorpse_Struct))
return;
if (this->Admin() < commandEditPlayerCorpses) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/delcorpse");
return;
}
GMDelCorpse_Struct* dc = (GMDelCorpse_Struct *)app->pBuffer;
Mob* corpse = entity_list.GetMob(dc->corpsename);
if (corpse == 0) {
return;
}
if (corpse->IsCorpse() != true) {
return;
}
corpse->CastToCorpse()->Delete();
std::cout << name << " deleted corpse " << dc->corpsename << std::endl;
Message(Chat::Red, "Corpse %s deleted.", dc->corpsename);
return;
}
void Client::Handle_OP_GMEmoteZone(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/emote");
return;
}
if (app->size != sizeof(GMEmoteZone_Struct)) {
LogError("Wrong size: OP_GMEmoteZone, size=[{}], expected [{}]", app->size, sizeof(GMEmoteZone_Struct));
return;
}
GMEmoteZone_Struct* gmez = (GMEmoteZone_Struct*)app->pBuffer;
char* newmessage = nullptr;
if (strstr(gmez->text, "^") == 0)
entity_list.Message(0, 15, gmez->text);
else {
for (newmessage = strtok((char*)gmez->text, "^"); newmessage != nullptr; newmessage = strtok(nullptr, "^"))
entity_list.Message(0, 15, newmessage);
}
return;
}
void Client::Handle_OP_GMEndTraining(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMTrainEnd_Struct)) {
LogDebug("Size mismatch in OP_GMEndTraining expected [{}] got [{}]", sizeof(GMTrainEnd_Struct), app->size);
DumpPacket(app);
return;
}
OPGMEndTraining(app);
return;
}
void Client::Handle_OP_GMFind(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/find");
return;
}
if (app->size != sizeof(GMSummon_Struct)) {
LogError("Wrong size: OP_GMFind, size=[{}], expected [{}]", app->size, sizeof(GMSummon_Struct));
return;
}
//Break down incoming
GMSummon_Struct* request = (GMSummon_Struct*)app->pBuffer;
//Create a new outgoing
auto outapp = new EQApplicationPacket(OP_GMFind, sizeof(GMSummon_Struct));
GMSummon_Struct* foundplayer = (GMSummon_Struct*)outapp->pBuffer;
//Copy the constants
strcpy(foundplayer->charname, request->charname);
strcpy(foundplayer->gmname, request->gmname);
//Check if the NPC exits intrazone...
Mob* gt = entity_list.GetMob(request->charname);
if (gt != 0) {
foundplayer->success = 1;
foundplayer->x = (int32)gt->GetX();
foundplayer->y = (int32)gt->GetY();
foundplayer->z = (int32)gt->GetZ();
foundplayer->zoneID = zone->GetZoneID();
}
//Send the packet...
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_GMGoto(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMSummon_Struct)) {
std::cout << "Wrong size on OP_GMGoto. Got: " << app->size << ", Expected: " << sizeof(GMSummon_Struct) << std::endl;
return;
}
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/goto");
return;
}
GMSummon_Struct* gmg = (GMSummon_Struct*)app->pBuffer;
Mob* gt = entity_list.GetMob(gmg->charname);
if (gt != nullptr) {
this->MovePC(zone->GetZoneID(), zone->GetInstanceID(), gt->GetX(), gt->GetY(), gt->GetZ(), gt->GetHeading());
}
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected.");
else {
auto pack = new ServerPacket(ServerOP_GMGoto, sizeof(ServerGMGoto_Struct));
memset(pack->pBuffer, 0, pack->size);
ServerGMGoto_Struct* wsgmg = (ServerGMGoto_Struct*)pack->pBuffer;
strcpy(wsgmg->myname, this->GetName());
strcpy(wsgmg->gotoname, gmg->charname);
wsgmg->admin = admin;
worldserver.SendPacket(pack);
safe_delete(pack);
}
return;
}
void Client::Handle_OP_GMHideMe(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/hideme");
return;
}
if (app->size != sizeof(SpawnAppearance_Struct)) {
LogError("Wrong size: OP_GMHideMe, size=[{}], expected [{}]", app->size, sizeof(SpawnAppearance_Struct));
return;
}
SpawnAppearance_Struct* sa = (SpawnAppearance_Struct*)app->pBuffer;
Message(Chat::Red, "#: %i, %i", sa->type, sa->parameter);
SetHideMe(!sa->parameter);
return;
}
void Client::Handle_OP_GMKick(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMKick_Struct))
return;
if (this->Admin() < minStatusToKick) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/kick");
return;
}
GMKick_Struct* gmk = (GMKick_Struct *)app->pBuffer;
Client* client = entity_list.GetClientByName(gmk->name);
if (client == 0) {
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_KickPlayer, sizeof(ServerKickPlayer_Struct));
ServerKickPlayer_Struct* skp = (ServerKickPlayer_Struct*)pack->pBuffer;
strcpy(skp->adminname, gmk->gmname);
strcpy(skp->name, gmk->name);
skp->adminrank = this->Admin();
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
else {
entity_list.QueueClients(this, app);
//client->Kick();
}
return;
}
void Client::Handle_OP_GMKill(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/kill");
return;
}
if (app->size != sizeof(GMKill_Struct)) {
LogError("Wrong size: OP_GMKill, size=[{}], expected [{}]", app->size, sizeof(GMKill_Struct));
return;
}
GMKill_Struct* gmk = (GMKill_Struct *)app->pBuffer;
Mob* obj = entity_list.GetMob(gmk->name);
Client* client = entity_list.GetClientByName(gmk->name);
if (obj != 0) {
if (client != 0) {
entity_list.QueueClients(this, app);
}
else {
obj->Kill();
}
}
else {
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_KillPlayer, sizeof(ServerKillPlayer_Struct));
ServerKillPlayer_Struct* skp = (ServerKillPlayer_Struct*)pack->pBuffer;
strcpy(skp->gmname, gmk->gmname);
strcpy(skp->target, gmk->name);
skp->admin = this->Admin();
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
return;
}
void Client::Handle_OP_GMLastName(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMLastName_Struct)) {
std::cout << "Wrong size on OP_GMLastName. Got: " << app->size << ", Expected: " << sizeof(GMLastName_Struct) << std::endl;
return;
}
GMLastName_Struct* gmln = (GMLastName_Struct*)app->pBuffer;
if (strlen(gmln->lastname) >= 64) {
Message(Chat::Red, "/LastName: New last name too long. (max=63)");
}
else {
Client* client = entity_list.GetClientByName(gmln->name);
if (client == 0) {
Message(Chat::Red, "/LastName: %s not found", gmln->name);
}
else {
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(client->account_name, client->name, "/lastname");
return;
}
else
client->ChangeLastName(gmln->lastname);
}
gmln->unknown[0] = 1;
gmln->unknown[1] = 1;
gmln->unknown[2] = 1;
gmln->unknown[3] = 1;
entity_list.QueueClients(this, app, false);
}
return;
}
void Client::Handle_OP_GMNameChange(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMName_Struct)) {
LogError("Wrong size: OP_GMNameChange, size=[{}], expected [{}]", app->size, sizeof(GMName_Struct));
return;
}
const GMName_Struct* gmn = (const GMName_Struct *)app->pBuffer;
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/name");
return;
}
Client* client = entity_list.GetClientByName(gmn->oldname);
LogInfo("GM([{}]) changeing players name. Old:[{}] New:[{}]", GetName(), gmn->oldname, gmn->newname);
bool usedname = database.CheckUsedName((const char*)gmn->newname);
if (client == 0) {
Message(Chat::Red, "%s not found for name change. Operation failed!", gmn->oldname);
return;
}
if ((strlen(gmn->newname) > 63) || (strlen(gmn->newname) == 0)) {
Message(Chat::Red, "Invalid number of characters in new name (%s).", gmn->newname);
return;
}
if (!usedname) {
Message(Chat::Red, "%s is already in use. Operation failed!", gmn->newname);
return;
}
database.UpdateName(gmn->oldname, gmn->newname);
strcpy(client->name, gmn->newname);
client->Save();
if (gmn->badname == 1) {
database.AddToNameFilter(gmn->oldname);
}
EQApplicationPacket* outapp = app->Copy();
GMName_Struct* gmn2 = (GMName_Struct*)outapp->pBuffer;
gmn2->unknown[0] = 1;
gmn2->unknown[1] = 1;
gmn2->unknown[2] = 1;
entity_list.QueueClients(this, outapp, false);
safe_delete(outapp);
UpdateWho();
return;
}
void Client::Handle_OP_GMSearchCorpse(const EQApplicationPacket *app)
{
// Could make this into a rule, although there is a hard limit since we are using a popup, of 4096 bytes that can
// be displayed in the window, including all the HTML formatting tags.
//
const int maxResults = 10;
if (app->size < sizeof(GMSearchCorpse_Struct))
{
LogDebug("OP_GMSearchCorpse size lower than expected: got [{}] expected at least [{}]", app->size, sizeof(GMSearchCorpse_Struct));
DumpPacket(app);
return;
}
GMSearchCorpse_Struct *gmscs = (GMSearchCorpse_Struct *)app->pBuffer;
gmscs->Name[63] = '\0';
auto escSearchString = new char[129];
database.DoEscapeString(escSearchString, gmscs->Name, strlen(gmscs->Name));
std::string query = StringFormat("SELECT charname, zone_id, x, y, z, time_of_death, is_rezzed, is_buried "
"FROM character_corpses WheRE charname LIKE '%%%s%%' ORDER BY charname LIMIT %i",
escSearchString, maxResults);
safe_delete_array(escSearchString);
auto results = database.QueryDatabase(query);
if (!results.Success()) {
return;
}
if (results.RowCount() == 0)
return;
if (results.RowCount() == maxResults)
Message(Chat::Red, "Your search found too many results; some are not displayed.");
else
Message(Chat::Yellow, "There are %i corpse(s) that match the search string '%s'.", results.RowCount(), gmscs->Name);
char charName[64], time_of_death[20];
std::string popupText = "<table><tr><td>Name</td><td>Zone</td><td>X</td><td>Y</td><td>Z</td><td>Date</td><td>"
"Rezzed</td><td>Buried</td></tr><tr><td> </td><td></td><td></td><td></td><td></td><td>"
"</td><td></td><td></td></tr>";
for (auto row = results.begin(); row != results.end(); ++row) {
strn0cpy(charName, row[0], sizeof(charName));
uint32 ZoneID = atoi(row[1]);
float CorpseX = atof(row[2]);
float CorpseY = atof(row[3]);
float CorpseZ = atof(row[4]);
strn0cpy(time_of_death, row[5], sizeof(time_of_death));
bool corpseRezzed = atoi(row[6]);
bool corpseBuried = atoi(row[7]);
popupText += StringFormat("<tr><td>%s</td><td>%s</td><td>%8.0f</td><td>%8.0f</td><td>%8.0f</td><td>%s</td><td>%s</td><td>%s</td></tr>",
charName, StaticGetZoneName(ZoneID), CorpseX, CorpseY, CorpseZ, time_of_death,
corpseRezzed ? "Yes" : "No", corpseBuried ? "Yes" : "No");
if (popupText.size() > 4000) {
Message(Chat::Red, "Unable to display all the results.");
break;
}
}
popupText += "</table>";
SendPopupToClient("Corpses", popupText.c_str());
}
void Client::Handle_OP_GMServers(const EQApplicationPacket *app)
{
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_ZoneStatus, strlen(this->GetName()) + 2);
memset(pack->pBuffer, (uint8)admin, 1);
strcpy((char *)&pack->pBuffer[1], this->GetName());
worldserver.SendPacket(pack);
safe_delete(pack);
}
return;
}
void Client::Handle_OP_GMSummon(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMSummon_Struct)) {
std::cout << "Wrong size on OP_GMSummon. Got: " << app->size << ", Expected: " << sizeof(GMSummon_Struct) << std::endl;
return;
}
OPGMSummon(app);
return;
}
void Client::Handle_OP_GMToggle(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMToggle_Struct)) {
std::cout << "Wrong size on OP_GMToggle. Got: " << app->size << ", Expected: " << sizeof(GMToggle_Struct) << std::endl;
return;
}
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/toggle");
return;
}
GMToggle_Struct *ts = (GMToggle_Struct *)app->pBuffer;
if (ts->toggle == 0) {
this->MessageString(Chat::White, TOGGLE_OFF);
//Message(0, "Turning tells OFF");
tellsoff = true;
}
else if (ts->toggle == 1) {
//Message(0, "Turning tells ON");
this->MessageString(Chat::White, TOGGLE_ON);
tellsoff = false;
}
else {
Message(0, "Unkown value in /toggle packet");
}
UpdateWho();
return;
}
void Client::Handle_OP_GMTraining(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMTrainee_Struct)) {
LogDebug("Size mismatch in OP_GMTraining expected [{}] got [{}]", sizeof(GMTrainee_Struct), app->size);
DumpPacket(app);
return;
}
OPGMTraining(app);
return;
}
void Client::Handle_OP_GMTrainSkill(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMSkillChange_Struct)) {
LogDebug("Size mismatch in OP_GMTrainSkill expected [{}] got [{}]", sizeof(GMSkillChange_Struct), app->size);
DumpPacket(app);
return;
}
OPGMTrainSkill(app);
return;
}
void Client::Handle_OP_GMZoneRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMZoneRequest_Struct)) {
std::cout << "Wrong size on OP_GMZoneRequest. Got: " << app->size << ", Expected: " << sizeof(GMZoneRequest_Struct) << std::endl;
return;
}
if (this->Admin() < minStatusToBeGM) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/zone");
return;
}
GMZoneRequest_Struct* gmzr = (GMZoneRequest_Struct*)app->pBuffer;
float target_x = -1, target_y = -1, target_z = -1, target_heading;
int16 min_status = 0;
uint8 min_level = 0;
char target_zone[32];
uint16 zone_id = gmzr->zone_id;
if (gmzr->zone_id == 0)
zone_id = zonesummon_id;
const char* zone_short_name = ZoneName(zone_id);
if (zone_short_name == nullptr)
target_zone[0] = 0;
else
strcpy(target_zone, zone_short_name);
// this both loads the safe points and does a sanity check on zone name
if (!content_db.GetSafePoints(
target_zone,
0,
&target_x,
&target_y,
&target_z,
&target_heading,
&min_status,
&min_level
)) {
target_zone[0] = 0;
}
auto outapp = new EQApplicationPacket(OP_GMZoneRequest, sizeof(GMZoneRequest_Struct));
GMZoneRequest_Struct* gmzr2 = (GMZoneRequest_Struct*)outapp->pBuffer;
strcpy(gmzr2->charname, this->GetName());
gmzr2->zone_id = gmzr->zone_id;
gmzr2->x = target_x;
gmzr2->y = target_y;
gmzr2->z = target_z;
gmzr2->heading = target_heading;
// Next line stolen from ZoneChange as well... - This gives us a nicer message than the normal "zone is down" message...
if (target_zone[0] != 0 && admin >= min_status && GetLevel() >= min_level)
gmzr2->success = 1;
else {
std::cout << "GetZoneSafeCoords failed. zoneid = " << gmzr->zone_id << "; czone = " << zone->GetZoneID() << std::endl;
gmzr2->success = 0;
}
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_OP_GMZoneRequest2(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToBeGM) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/zone");
return;
}
if (app->size < sizeof(uint32)) {
LogError("OP size error: OP_GMZoneRequest2 expected:[{}] got:[{}]", sizeof(uint32), app->size);
return;
}
uint32 zonereq = *((uint32 *)app->pBuffer);
GoToSafeCoords(zonereq, 0);
return;
}
void Client::Handle_OP_GroupAcknowledge(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_GroupCancelInvite(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupCancel_Struct)) {
LogError("Invalid size for OP_GroupCancelInvite: Expected: [{}], Got: [{}]",
sizeof(GroupCancel_Struct), app->size);
return;
}
GroupCancel_Struct* gf = (GroupCancel_Struct*)app->pBuffer;
Mob* inviter = entity_list.GetClientByName(gf->name1);
if (inviter != nullptr)
{
if (inviter->IsClient())
inviter->CastToClient()->QueuePacket(app);
}
else
{
auto pack = new ServerPacket(ServerOP_GroupCancelInvite, sizeof(GroupCancel_Struct));
memcpy(pack->pBuffer, gf, sizeof(GroupCancel_Struct));
worldserver.SendPacket(pack);
safe_delete(pack);
}
if (!GetMerc())
{
database.SetGroupID(GetName(), 0, CharacterID(), false);
}
return;
}
void Client::Handle_OP_GroupDelete(const EQApplicationPacket *app)
{
//should check for leader, only they should be able to do this..
Group* group = GetGroup();
if (group)
group->DisbandGroup();
if (LFP)
UpdateLFP();
return;
}
void Client::Handle_OP_GroupDisband(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupGeneric_Struct)) {
LogError("Invalid size for GroupGeneric_Struct: Expected: [{}], Got: [{}]",
sizeof(GroupGeneric_Struct), app->size);
return;
}
LogDebug("Member Disband Request from [{}]\n", GetName());
GroupGeneric_Struct* gd = (GroupGeneric_Struct*)app->pBuffer;
Raid *raid = entity_list.GetRaidByClient(this);
if (raid)
{
Mob* memberToDisband = nullptr;
if (!raid->IsGroupLeader(GetName()))
memberToDisband = this;
else
memberToDisband = GetTarget();
if (!memberToDisband)
memberToDisband = entity_list.GetMob(gd->name2);
if (!memberToDisband)
memberToDisband = this;
if (!memberToDisband->IsClient())
return;
//we have a raid.. see if we're in a raid group
uint32 grp = raid->GetGroup(memberToDisband->GetName());
bool wasGrpLdr = raid->members[raid->GetPlayerIndex(memberToDisband->GetName())].IsGroupLeader;
if (grp < 12) {
if (wasGrpLdr) {
raid->SetGroupLeader(memberToDisband->GetName(), false);
for (int x = 0; x < MAX_RAID_MEMBERS; x++)
{
if (raid->members[x].GroupNumber == grp)
{
if (strlen(raid->members[x].membername) > 0 && strcmp(raid->members[x].membername, memberToDisband->GetName()) != 0)
{
raid->SetGroupLeader(raid->members[x].membername);
break;
}
}
}
}
raid->MoveMember(memberToDisband->GetName(), 0xFFFFFFFF);
raid->GroupUpdate(grp); //break
//raid->SendRaidGroupRemove(memberToDisband->GetName(), grp);
//raid->SendGroupUpdate(memberToDisband->CastToClient());
raid->SendGroupDisband(memberToDisband->CastToClient());
}
//we're done
return;
}
Group* group = GetGroup();
if (!group)
return;
#ifdef BOTS
// this block is necessary to allow more control over controlling how bots are zoned or camped.
if (Bot::GroupHasBot(group)) {
if (group->IsLeader(this)) {
if ((GetTarget() == 0 || GetTarget() == this) || (group->GroupCount() < 3)) {
Bot::ProcessBotGroupDisband(this, std::string());
}
else {
Mob* tempMember = entity_list.GetMob(gd->name1); //Name1 is the target you are disbanding
if (tempMember && tempMember->IsBot()) {
tempMember->CastToBot()->RemoveBotFromGroup(tempMember->CastToBot(), group);
if (LFP)
{
// If we are looking for players, update to show we are on our own now.
UpdateLFP();
}
return; //No need to continue from here we were removing a bot from party
}
}
}
}
group = GetGroup();
if (!group) //We must recheck this here.. incase the final bot disbanded the party..otherwise we crash
return;
#endif
Mob* memberToDisband = GetTarget();
if (!memberToDisband)
memberToDisband = entity_list.GetMob(gd->name2);
if (memberToDisband) {
auto group2 = memberToDisband->GetGroup();
if (group2 != group) // they're not in our group!
memberToDisband = this;
}
if (group->GroupCount() < 3)
{
group->DisbandGroup();
if (GetMerc())
GetMerc()->Suspend();
}
else if (group->IsLeader(this) && GetTarget() == nullptr)
{
if (group->GroupCount() > 2 && GetMerc() && !GetMerc()->IsSuspended())
{
group->DisbandGroup();
GetMerc()->MercJoinClientGroup();
}
else
{
group->DisbandGroup();
if (GetMerc())
GetMerc()->Suspend();
}
}
else if (group->IsLeader(this) && (GetTarget() == this || memberToDisband == this))
{
LeaveGroup();
if (GetMerc() && !GetMerc()->IsSuspended())
{
GetMerc()->MercJoinClientGroup();
}
}
else
{
if (memberToDisband)
{
if (group->IsLeader(this))
{
// the group leader can kick other members out of the group...
if (memberToDisband->IsClient())
{
group->DelMember(memberToDisband, false);
Client* memberClient = memberToDisband->CastToClient();
Merc* memberMerc = memberToDisband->CastToClient()->GetMerc();
if (memberClient && memberMerc)
{
memberMerc->MercJoinClientGroup();
}
}
else if (memberToDisband->IsMerc())
{
memberToDisband->CastToMerc()->Suspend();
}
}
else
{
// ...but other members can only remove themselves
group->DelMember(this, false);
if (GetMerc() && !GetMerc()->IsSuspended())
{
GetMerc()->MercJoinClientGroup();
}
}
}
else
{
LogError("Failed to remove player from group. Unable to find player named [{}] in player group", gd->name2);
}
}
if (LFP)
{
// If we are looking for players, update to show we are on our own now.
UpdateLFP();
}
return;
}
void Client::Handle_OP_GroupFollow(const EQApplicationPacket *app)
{
Handle_OP_GroupFollow2(app);
}
void Client::Handle_OP_GroupFollow2(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupGeneric_Struct)) {
LogError("Invalid size for OP_GroupFollow: Expected: [{}], Got: [{}]",
sizeof(GroupGeneric_Struct), app->size);
return;
}
if (LFP) {
// If we were looking for players to start our own group, but we accept an invitation to another
// group, turn LFP off.
database.SetLFP(CharacterID(), false);
worldserver.StopLFP(CharacterID());
}
GroupGeneric_Struct* gf = (GroupGeneric_Struct*)app->pBuffer;
Mob* inviter = entity_list.GetClientByName(gf->name1);
// Inviter and Invitee are in the same zone
if (inviter != nullptr && inviter->IsClient())
{
if (GroupFollow(inviter->CastToClient()))
{
strn0cpy(gf->name1, inviter->GetName(), sizeof(gf->name1));
strn0cpy(gf->name2, GetName(), sizeof(gf->name2));
inviter->CastToClient()->QueuePacket(app);//notify inviter the client accepted
}
}
else if (inviter == nullptr)
{
// Inviter is in another zone - Remove merc from group now if any
LeaveGroup();
auto pack = new ServerPacket(ServerOP_GroupFollow, sizeof(ServerGroupFollow_Struct));
ServerGroupFollow_Struct *sgfs = (ServerGroupFollow_Struct *)pack->pBuffer;
sgfs->CharacterID = CharacterID();
strn0cpy(sgfs->gf.name1, gf->name1, sizeof(sgfs->gf.name1));
strn0cpy(sgfs->gf.name2, gf->name2, sizeof(sgfs->gf.name2));
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void Client::Handle_OP_GroupInvite(const EQApplicationPacket *app)
{
//this seems to be the initial invite to form a group
Handle_OP_GroupInvite2(app);
}
void Client::Handle_OP_GroupInvite2(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupInvite_Struct)) {
LogError("Invalid size for OP_GroupInvite: Expected: [{}], Got: [{}]",
sizeof(GroupInvite_Struct), app->size);
return;
}
GroupInvite_Struct* gis = (GroupInvite_Struct*)app->pBuffer;
Mob *Invitee = entity_list.GetMob(gis->invitee_name);
if (Invitee == this)
{
MessageString(Chat::LightGray, GROUP_INVITEE_SELF);
return;
}
if (Invitee)
{
if (Invitee->IsClient())
{
if (Invitee->CastToClient()->MercOnlyOrNoGroup() && !Invitee->IsRaidGrouped())
{
if (app->GetOpcode() == OP_GroupInvite2)
{
//Make a new packet using all the same information but make sure it's a fixed GroupInvite opcode so we
//Don't have to deal with GroupFollow2 crap.
auto outapp =
new EQApplicationPacket(OP_GroupInvite, sizeof(GroupInvite_Struct));
memcpy(outapp->pBuffer, app->pBuffer, outapp->size);
Invitee->CastToClient()->QueuePacket(outapp);
safe_delete(outapp);
return;
}
else
{
//The correct opcode, no reason to bother wasting time reconstructing the packet
Invitee->CastToClient()->QueuePacket(app);
}
}
}
#ifdef BOTS
else if (Invitee->IsBot()) {
Bot::ProcessBotGroupInvite(this, std::string(Invitee->GetName()));
}
#endif
}
else
{
auto pack = new ServerPacket(ServerOP_GroupInvite, sizeof(GroupInvite_Struct));
memcpy(pack->pBuffer, gis, sizeof(GroupInvite_Struct));
worldserver.SendPacket(pack);
safe_delete(pack);
}
return;
}
void Client::Handle_OP_GroupMakeLeader(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_GroupMakeLeader, app, GroupMakeLeader_Struct);
GroupMakeLeader_Struct *gmls = (GroupMakeLeader_Struct *)app->pBuffer;
Mob* NewLeader = entity_list.GetClientByName(gmls->NewLeader);
Group* g = GetGroup();
if (NewLeader && g)
{
if (g->IsLeader(this))
g->ChangeLeader(NewLeader);
else {
LogDebug("Group /makeleader request originated from non-leader member: [{}]", GetName());
DumpPacket(app);
}
}
}
void Client::Handle_OP_GroupMentor(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupMentor_Struct)) {
LogError("Wrong size: OP_GroupMentor, size=[{}], expected [{}]", app->size, sizeof(GroupMentor_Struct));
DumpPacket(app);
return;
}
GroupMentor_Struct *gms = (GroupMentor_Struct *)app->pBuffer;
gms->name[63] = '\0';
if (IsRaidGrouped()) {
Raid *raid = GetRaid();
if (!raid)
return;
uint32 group_id = raid->GetGroup(this);
if (group_id > 11)
return;
if (strlen(gms->name))
raid->SetGroupMentor(group_id, gms->percent, gms->name);
else
raid->ClearGroupMentor(group_id);
return;
}
Group *group = GetGroup();
if (!group)
return;
if (strlen(gms->name))
group->SetGroupMentor(gms->percent, gms->name);
else
group->ClearGroupMentor();
return;
}
void Client::Handle_OP_GroupRoles(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupRole_Struct)) {
LogError("Wrong size: OP_GroupRoles, size=[{}], expected [{}]", app->size, sizeof(GroupRole_Struct));
DumpPacket(app);
return;
}
GroupRole_Struct *grs = (GroupRole_Struct*)app->pBuffer;
Group *g = GetGroup();
if (!g)
return;
switch (grs->RoleNumber)
{
case 1: //Main Tank
{
if (grs->Toggle)
g->DelegateMainTank(grs->Name1, grs->Toggle);
else
g->UnDelegateMainTank(grs->Name1, grs->Toggle);
break;
}
case 2: //Main Assist
{
if (grs->Toggle)
g->DelegateMainAssist(grs->Name1, grs->Toggle);
else
g->UnDelegateMainAssist(grs->Name1, grs->Toggle);
break;
}
case 3: //Puller
{
if (grs->Toggle)
g->DelegatePuller(grs->Name1, grs->Toggle);
else
g->UnDelegatePuller(grs->Name1, grs->Toggle);
break;
}
default:
break;
}
}
void Client::Handle_OP_GroupUpdate(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupUpdate_Struct))
{
LogDebug("Size mismatch on OP_GroupUpdate: got [{}] expected [{}]", app->size, sizeof(GroupUpdate_Struct));
DumpPacket(app);
return;
}
GroupUpdate_Struct* gu = (GroupUpdate_Struct*)app->pBuffer;
switch (gu->action) {
case groupActMakeLeader:
{
Mob* newleader = entity_list.GetClientByName(gu->membername[0]);
Group* group = this->GetGroup();
if (newleader && group) {
// the client only sends this if it's the group leader, but check anyway
if (group->IsLeader(this))
group->ChangeLeader(newleader);
else {
LogDebug("Group /makeleader request originated from non-leader member: [{}]", GetName());
DumpPacket(app);
}
}
break;
}
default:
{
LogDebug("Received unhandled OP_GroupUpdate requesting action [{}]", gu->action);
DumpPacket(app);
return;
}
}
}
void Client::Handle_OP_GuildBank(const EQApplicationPacket *app)
{
if (!GuildBanks)
return;
if ((int)zone->GetZoneID() != RuleI(World, GuildBankZoneID))
{
Message(Chat::Red, "The Guild Bank is not available in this zone.");
return;
}
if (app->size < sizeof(uint32)) {
LogError("Wrong size: OP_GuildBank, size=[{}], expected [{}]", app->size, sizeof(uint32));
DumpPacket(app);
return;
}
char *Buffer = (char *)app->pBuffer;
uint32 Action = VARSTRUCT_DECODE_TYPE(uint32, Buffer);
uint32 sentAction = Action;
if (!IsInAGuild())
{
Message(Chat::Red, "You must be in a Guild to use the Guild Bank.");
if (Action == GuildBankDeposit)
GuildBankDepositAck(true, sentAction);
else
GuildBankAck();
return;
}
if (!IsGuildBanker())
{
if ((Action != GuildBankDeposit) && (Action != GuildBankViewItem) && (Action != GuildBankWithdraw))
{
LogError("Suspected hacking attempt on guild bank from [{}]", GetName());
GuildBankAck();
return;
}
}
switch (Action)
{
case GuildBankPromote:
{
if (GuildBanks->IsAreaFull(GuildID(), GuildBankMainArea))
{
MessageString(Chat::Red, GUILD_BANK_FULL);
GuildBankDepositAck(true, sentAction);
return;
}
GuildBankPromote_Struct *gbps = (GuildBankPromote_Struct*)app->pBuffer;
int Slot = GuildBanks->Promote(GuildID(), gbps->Slot);
if (Slot >= 0)
{
EQ::ItemInstance* inst = GuildBanks->GetItem(GuildID(), GuildBankMainArea, Slot, 1);
if (inst)
{
MessageString(Chat::LightGray, GUILD_BANK_TRANSFERRED, inst->GetItem()->Name);
safe_delete(inst);
}
}
else
Message(Chat::Red, "Unexpected error while moving item into Guild Bank.");
GuildBankAck();
break;
}
case GuildBankViewItem:
{
GuildBankViewItem_Struct *gbvis = (GuildBankViewItem_Struct*)app->pBuffer;
EQ::ItemInstance* inst = GuildBanks->GetItem(GuildID(), gbvis->Area, gbvis->SlotID, 1);
if (!inst)
break;
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
break;
}
case GuildBankDeposit: // Deposit Item
{
if (GuildBanks->IsAreaFull(GuildID(), GuildBankDepositArea))
{
MessageString(Chat::Red, GUILD_BANK_FULL);
GuildBankDepositAck(true, sentAction);
return;
}
EQ::ItemInstance *CursorItemInst = GetInv().GetItem(EQ::invslot::slotCursor);
bool Allowed = true;
if (!CursorItemInst)
{
Message(Chat::Red, "No Item on the cursor.");
GuildBankDepositAck(true, sentAction);
return;
}
const EQ::ItemData* CursorItem = CursorItemInst->GetItem();
if (!CursorItem->NoDrop || CursorItemInst->IsAttuned())
{
Allowed = false;
}
else if (CursorItemInst->IsNoneEmptyContainer())
{
Allowed = false;
}
else if (CursorItemInst->IsAugmented())
{
Allowed = false;
}
else if (CursorItem->NoRent == 0)
{
Allowed = false;
}
else if (CursorItem->LoreFlag && GuildBanks->HasItem(GuildID(), CursorItem->ID))
{
Allowed = false;
}
if (!Allowed)
{
MessageString(Chat::Red, GUILD_BANK_CANNOT_DEPOSIT);
GuildBankDepositAck(true, sentAction);
return;
}
if (GuildBanks->AddItem(GuildID(), GuildBankDepositArea, CursorItem->ID, CursorItemInst->GetCharges(), GetName(), GuildBankBankerOnly, ""))
{
GuildBankDepositAck(false, sentAction);
DeleteItemInInventory(EQ::invslot::slotCursor, 0, false);
}
break;
}
case GuildBankPermissions:
{
GuildBankPermissions_Struct *gbps = (GuildBankPermissions_Struct*)app->pBuffer;
if (gbps->Permissions == 1)
GuildBanks->SetPermissions(GuildID(), gbps->SlotID, gbps->Permissions, gbps->MemberName);
else
GuildBanks->SetPermissions(GuildID(), gbps->SlotID, gbps->Permissions, "");
GuildBankAck();
break;
}
case GuildBankWithdraw:
{
if (GetInv()[EQ::invslot::slotCursor])
{
MessageString(Chat::Red, GUILD_BANK_EMPTY_HANDS);
GuildBankAck();
break;
}
GuildBankWithdrawItem_Struct *gbwis = (GuildBankWithdrawItem_Struct*)app->pBuffer;
EQ::ItemInstance* inst = GuildBanks->GetItem(GuildID(), gbwis->Area, gbwis->SlotID, gbwis->Quantity);
if (!inst)
{
GuildBankAck();
break;
}
if (!IsGuildBanker() && !GuildBanks->AllowedToWithdraw(GuildID(), gbwis->Area, gbwis->SlotID, GetName()))
{
LogError("Suspected attempted hack on the guild bank from [{}]", GetName());
GuildBankAck();
safe_delete(inst);
break;
}
if (CheckLoreConflict(inst->GetItem()))
{
MessageString(Chat::Red, DUP_LORE);
GuildBankAck();
safe_delete(inst);
break;
}
if (gbwis->Quantity > 0)
{
PushItemOnCursor(*inst);
SendItemPacket(EQ::invslot::slotCursor, inst, ItemPacketLimbo);
GuildBanks->DeleteItem(GuildID(), gbwis->Area, gbwis->SlotID, gbwis->Quantity);
}
else
{
Message(0, "Unable to withdraw 0 quantity of %s", inst->GetItem()->Name);
}
safe_delete(inst);
GuildBankAck();
break;
}
case GuildBankSplitStacks:
{
if (GuildBanks->IsAreaFull(GuildID(), GuildBankMainArea))
MessageString(Chat::Red, GUILD_BANK_FULL);
else
{
GuildBankWithdrawItem_Struct *gbwis = (GuildBankWithdrawItem_Struct*)app->pBuffer;
GuildBanks->SplitStack(GuildID(), gbwis->SlotID, gbwis->Quantity);
}
GuildBankAck();
break;
}
case GuildBankMergeStacks:
{
GuildBankWithdrawItem_Struct *gbwis = (GuildBankWithdrawItem_Struct*)app->pBuffer;
GuildBanks->MergeStacks(GuildID(), gbwis->SlotID);
GuildBankAck();
break;
}
default:
{
Message(Chat::Red, "Unexpected GuildBank action.");
LogError("Received unexpected guild bank action code [{}] from [{}]", Action, GetName());
}
}
}
void Client::Handle_OP_GuildCreate(const EQApplicationPacket *app)
{
if (IsInAGuild())
{
Message(Chat::Red, "You are already in a guild!");
return;
}
if (!RuleB(Guild, PlayerCreationAllowed))
{
Message(Chat::Red, "This feature is disabled on this server. Contact a GM or post on your server message boards to create a guild.");
return;
}
if ((Admin() < RuleI(Guild, PlayerCreationRequiredStatus)) ||
(GetLevel() < RuleI(Guild, PlayerCreationRequiredLevel)) ||
(database.GetTotalTimeEntitledOnAccount(AccountID()) < (unsigned int)RuleI(Guild, PlayerCreationRequiredTime)))
{
Message(Chat::Red, "Your status, level or time playing on this account are insufficient to use this feature.");
return;
}
// The Underfoot client Guild Creation window will only allow a guild name of <= around 30 characters, but the packet is 64 bytes. Sanity check the
// name anway.
//
char *GuildName = (char *)app->pBuffer;
#ifdef DARWIN
#if __DARWIN_C_LEVEL < 200809L
if (strlen(GuildName) > 60)
#else
if (strnlen(GuildName, 64) > 60)
#endif // __DARWIN_C_LEVEL
#else
if (strnlen(GuildName, 64) > 60)
#endif // DARWIN
{
Message(Chat::Red, "Guild name too long.");
return;
}
for (unsigned int i = 0; i < strlen(GuildName); ++i)
{
if (!isalpha(GuildName[i]) && (GuildName[i] != ' '))
{
Message(Chat::Red, "Invalid character in Guild name.");
return;
}
}
int32 GuildCount = guild_mgr.DoesAccountContainAGuildLeader(AccountID());
if (GuildCount >= RuleI(Guild, PlayerCreationLimit))
{
Message(Chat::Red, "You cannot create this guild because this account may only be leader of %i guilds.", RuleI(Guild, PlayerCreationLimit));
return;
}
if (guild_mgr.GetGuildIDByName(GuildName) != GUILD_NONE)
{
MessageString(Chat::Red, GUILD_NAME_IN_USE);
return;
}
uint32 NewGuildID = guild_mgr.CreateGuild(GuildName, CharacterID());
LogGuilds("[{}]: Creating guild [{}] with leader [{}] via UF+ GUI. It was given id [{}]", GetName(),
GuildName, CharacterID(), (unsigned long)NewGuildID);
if (NewGuildID == GUILD_NONE)
Message(Chat::Red, "Guild creation failed.");
else
{
if (!guild_mgr.SetGuild(CharacterID(), NewGuildID, GUILD_LEADER))
Message(Chat::Red, "Unable to set guild leader's guild in the database. Contact a GM.");
else
{
Message(Chat::Yellow, "You are now the leader of %s", GuildName);
if (zone->GetZoneID() == RuleI(World, GuildBankZoneID) && GuildBanks)
GuildBanks->SendGuildBank(this);
SendGuildRanks();
}
}
}
void Client::Handle_OP_GuildDelete(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildDelete");
if (!IsInAGuild() || !guild_mgr.IsGuildLeader(GuildID(), CharacterID()))
Message(0, "You are not a guild leader or not in a guild.");
else {
LogGuilds("Deleting guild [{}] ([{}])", guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.DeleteGuild(GuildID()))
Message(0, "Guild delete failed.");
else {
Message(0, "Guild successfully deleted.");
}
}
}
void Client::Handle_OP_GuildDemote(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildDemote");
if (app->size != sizeof(GuildDemoteStruct)) {
LogGuilds("Error: app size of [{}] != size of GuildDemoteStruct of [{}]\n", app->size, sizeof(GuildDemoteStruct));
return;
}
if (!IsInAGuild())
Message(0, "Error: You arent in a guild!");
else if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_DEMOTE))
Message(0, "You dont have permission to invite.");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
GuildDemoteStruct* demote = (GuildDemoteStruct*)app->pBuffer;
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(demote->target, gci)) {
Message(0, "Unable to find '%s'", demote->target);
return;
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
if (gci.rank < 1) {
Message(0, "%s cannot be demoted any further!", demote->target);
return;
}
uint8 rank = gci.rank - 1;
LogGuilds("Demoting [{}] ([{}]) from rank [{}] ([{}]) to [{}] ([{}]) in [{}] ([{}])",
demote->target, gci.char_id,
guild_mgr.GetRankName(GuildID(), gci.rank), gci.rank,
guild_mgr.GetRankName(GuildID(), rank), rank,
guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.SetGuildRank(gci.char_id, rank)) {
Message(Chat::Red, "Error while setting rank %d on '%s'.", rank, demote->target);
return;
}
Message(0, "Successfully demoted %s to rank %d", demote->target, rank);
}
// SendGuildMembers(GuildID(), true);
return;
}
void Client::Handle_OP_GuildInvite(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildInvite");
if (app->size != sizeof(GuildCommand_Struct)) {
std::cout << "Wrong size: OP_GuildInvite, size=" << app->size << ", expected " << sizeof(GuildCommand_Struct) << std::endl;
return;
}
GuildCommand_Struct* gc = (GuildCommand_Struct*)app->pBuffer;
if (!IsInAGuild())
Message(0, "Error: You are not in a guild!");
else if (gc->officer > GUILD_MAX_RANK)
Message(Chat::Red, "Invalid rank.");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
//ok, the invite is also used for changing rank as well.
Mob* invitee = entity_list.GetMob(gc->othername);
if (!invitee) {
Message(Chat::Red, "Prospective guild member %s must be in zone to preform guild operations on them.", gc->othername);
return;
}
if (invitee->IsClient()) {
Client* client = invitee->CastToClient();
//ok, figure out what they are trying to do.
if (client->GuildID() == GuildID()) {
//they are already in this guild, must be a promotion or demotion
if (gc->officer < client->GuildRank()) {
//demotion
if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_DEMOTE)) {
Message(Chat::Red, "You dont have permission to demote.");
return;
}
//we could send this to the member and prompt them to see if they want to
//be demoted (I guess), but I dont see a point in that.
LogGuilds("[{}] ([{}]) is demoting [{}] ([{}]) to rank [{}] in guild [{}] ([{}])",
GetName(), CharacterID(),
client->GetName(), client->CharacterID(),
gc->officer,
guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.SetGuildRank(client->CharacterID(), gc->officer)) {
Message(Chat::Red, "There was an error during the demotion, DB may now be inconsistent.");
return;
}
}
else if (gc->officer > client->GuildRank()) {
//promotion
if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_PROMOTE)) {
Message(Chat::Red, "You dont have permission to demote.");
return;
}
LogGuilds("[{}] ([{}]) is asking to promote [{}] ([{}]) to rank [{}] in guild [{}] ([{}])",
GetName(), CharacterID(),
client->GetName(), client->CharacterID(),
gc->officer,
guild_mgr.GetGuildName(GuildID()), GuildID());
//record the promotion with guild manager so we know its valid when we get the reply
guild_mgr.RecordInvite(client->CharacterID(), GuildID(), gc->officer);
if (gc->guildeqid == 0)
gc->guildeqid = GuildID();
LogGuilds("Sending OP_GuildInvite for promotion to [{}], length [{}]", client->GetName(), app->size);
client->QueuePacket(app);
}
else {
Message(Chat::Red, "That member is already that rank.");
return;
}
}
else if (!client->IsInAGuild()) {
//they are not in this or any other guild, this is an invite
//
if (client->GetPendingGuildInvitation())
{
Message(Chat::Red, "That person is already considering a guild invitation.");
return;
}
if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_INVITE)) {
Message(Chat::Red, "You dont have permission to invite.");
return;
}
LogGuilds("Inviting [{}] ([{}]) into guild [{}] ([{}])",
client->GetName(), client->CharacterID(),
guild_mgr.GetGuildName(GuildID()), GuildID());
//record the invite with guild manager so we know its valid when we get the reply
guild_mgr.RecordInvite(client->CharacterID(), GuildID(), gc->officer);
if (gc->guildeqid == 0)
gc->guildeqid = GuildID();
// Convert Membership Level between RoF and previous clients.
if (client->ClientVersion() < EQ::versions::ClientVersion::RoF && ClientVersion() >= EQ::versions::ClientVersion::RoF)
{
gc->officer = 0;
}
if (client->ClientVersion() >= EQ::versions::ClientVersion::RoF && ClientVersion() < EQ::versions::ClientVersion::RoF)
{
gc->officer = 8;
}
LogGuilds("Sending OP_GuildInvite for invite to [{}], length [{}]", client->GetName(), app->size);
client->SetPendingGuildInvitation(true);
client->QueuePacket(app);
}
else {
//they are in some other guild
Message(Chat::Red, "Player is in a guild.");
return;
}
}
#ifdef BOTS
else if (invitee->IsBot()) {
// The guild system is too tightly coupled with the character_data table so we have to avoid using much of the system
Bot::ProcessGuildInvite(this, invitee->CastToBot());
return;
}
#endif
}
}
void Client::Handle_OP_GuildInviteAccept(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildInviteAccept");
SetPendingGuildInvitation(false);
if (app->size != sizeof(GuildInviteAccept_Struct)) {
std::cout << "Wrong size: OP_GuildInviteAccept, size=" << app->size << ", expected " << sizeof(GuildJoin_Struct) << std::endl;
return;
}
GuildInviteAccept_Struct* gj = (GuildInviteAccept_Struct*)app->pBuffer;
uint32 guildrank = gj->response;
if (ClientVersion() >= EQ::versions::ClientVersion::RoF)
{
if (gj->response > 9)
{
//dont care if the check fails (since we dont know the rank), just want to clear the entry.
guild_mgr.VerifyAndClearInvite(CharacterID(), gj->guildeqid, gj->response);
worldserver.SendEmoteMessage(gj->inviter, 0, 0, "%s has declined to join the guild.", this->GetName());
return;
}
}
if (gj->response == 5 || gj->response == 4) {
//dont care if the check fails (since we dont know the rank), just want to clear the entry.
guild_mgr.VerifyAndClearInvite(CharacterID(), gj->guildeqid, gj->response);
worldserver.SendEmoteMessage(gj->inviter, 0, 0, "%s has declined to join the guild.", this->GetName());
return;
}
//uint32 tmpeq = gj->guildeqid;
if (IsInAGuild() && gj->response == GuildRank())
Message(0, "Error: You're already in a guild!");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
LogGuilds("Guild Invite Accept: guild [{}], response [{}], inviter [{}], person [{}]",
gj->guildeqid, gj->response, gj->inviter, gj->newmember);
//ok, the invite is also used for changing rank as well.
Mob* inviter = entity_list.GetMob(gj->inviter);
if (inviter && inviter->IsClient())
{
Client* client = inviter->CastToClient();
// Convert Membership Level between RoF and previous clients.
if (client->ClientVersion() < EQ::versions::ClientVersion::RoF && ClientVersion() >= EQ::versions::ClientVersion::RoF)
{
guildrank = 0;
}
if (client->ClientVersion() >= EQ::versions::ClientVersion::RoF && ClientVersion() < EQ::versions::ClientVersion::RoF)
{
guildrank = 8;
}
}
//we dont really care a lot about what this packet means, as long as
//it has been authorized with the guild manager
if (!guild_mgr.VerifyAndClearInvite(CharacterID(), gj->guildeqid, guildrank)) {
worldserver.SendEmoteMessage(gj->inviter, 0, 0, "%s has sent an invalid response to your invite!", GetName());
Message(Chat::Red, "Invalid invite response packet!");
return;
}
if (gj->guildeqid == GuildID()) {
//only need to change rank.
LogGuilds("Changing guild rank of [{}] ([{}]) to rank [{}] in guild [{}] ([{}])",
GetName(), CharacterID(),
gj->response,
guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.SetGuildRank(CharacterID(), gj->response)) {
Message(Chat::Red, "There was an error during the rank change, DB may now be inconsistent.");
return;
}
}
else {
LogGuilds("Adding [{}] ([{}]) to guild [{}] ([{}]) at rank [{}]",
GetName(), CharacterID(),
guild_mgr.GetGuildName(gj->guildeqid), gj->guildeqid,
gj->response);
//change guild and rank
guildrank = gj->response;
if (ClientVersion() >= EQ::versions::ClientVersion::RoF)
{
if (gj->response == 8)
{
guildrank = 0;
}
}
if (!guild_mgr.SetGuild(CharacterID(), gj->guildeqid, guildrank)) {
Message(Chat::Red, "There was an error during the invite, DB may now be inconsistent.");
return;
}
if (zone->GetZoneID() == RuleI(World, GuildBankZoneID) && GuildBanks)
GuildBanks->SendGuildBank(this);
}
}
}
void Client::Handle_OP_GuildLeader(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildLeader");
if (app->size < 2) {
LogGuilds("Invalid length [{}] on OP_GuildLeader", app->size);
return;
}
app->pBuffer[app->size - 1] = 0;
GuildMakeLeader* gml = (GuildMakeLeader*)app->pBuffer;
if (!IsInAGuild())
Message(0, "Error: You arent in a guild!");
else if (GuildRank() != GUILD_LEADER)
Message(0, "Error: You arent the guild leader!");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
//NOTE: we could do cross-zone lookups here...
Client* newleader = entity_list.GetClientByName(gml->target);
if (newleader) {
LogGuilds("Transfering leadership of [{}] ([{}]) to [{}] ([{}])",
guild_mgr.GetGuildName(GuildID()), GuildID(),
newleader->GetName(), newleader->CharacterID());
if (guild_mgr.SetGuildLeader(GuildID(), newleader->CharacterID())) {
Message(0, "Successfully Transfered Leadership to %s.", gml->target);
newleader->Message(Chat::Yellow, "%s has transfered the guild leadership into your hands.", GetName());
}
else
Message(0, "Could not change leadership at this time.");
}
else
Message(0, "Failed to change leader, could not find target.");
}
// SendGuildMembers(GuildID(), true);
return;
}
void Client::Handle_OP_GuildManageBanker(const EQApplicationPacket *app)
{
LogGuilds("Got OP_GuildManageBanker of len [{}]", app->size);
if (app->size != sizeof(GuildManageBanker_Struct)) {
LogGuilds("Error: app size of [{}] != size of OP_GuildManageBanker of [{}]\n", app->size, sizeof(GuildManageBanker_Struct));
return;
}
GuildManageBanker_Struct* gmb = (GuildManageBanker_Struct*)app->pBuffer;
if (!IsInAGuild()) {
Message(Chat::Red, "Your not in a guild!");
return;
}
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(gmb->member, gci))
{
Message(0, "Unable to find '%s'", gmb->member);
return;
}
bool IsCurrentlyABanker = guild_mgr.GetBankerFlag(gci.char_id);
bool IsCurrentlyAnAlt = guild_mgr.GetAltFlag(gci.char_id);
bool NewBankerStatus = gmb->enabled & 0x01;
bool NewAltStatus = gmb->enabled & 0x02;
if ((IsCurrentlyABanker != NewBankerStatus) && !guild_mgr.IsGuildLeader(GuildID(), CharacterID()))
{
Message(Chat::Red, "Only the guild leader can assign guild bankers!");
return;
}
if (IsCurrentlyAnAlt != NewAltStatus)
{
bool IsAllowed = !strncasecmp(GetName(), gmb->member, strlen(GetName())) || (GuildRank() >= GUILD_OFFICER);
if (!IsAllowed)
{
Message(Chat::Red, "You are not allowed to change the alt status of %s", gmb->member);
return;
}
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
if (IsCurrentlyABanker != NewBankerStatus)
{
if (!guild_mgr.SetBankerFlag(gci.char_id, NewBankerStatus)) {
Message(Chat::Red, "Error setting guild banker flag.");
return;
}
if (NewBankerStatus)
Message(0, "%s has been made a guild banker.", gmb->member);
else
Message(0, "%s is no longer a guild banker.", gmb->member);
}
if (IsCurrentlyAnAlt != NewAltStatus)
{
if (!guild_mgr.SetAltFlag(gci.char_id, NewAltStatus)) {
Message(Chat::Red, "Error setting guild alt flag.");
return;
}
if (NewAltStatus)
Message(0, "%s has been marked as an alt.", gmb->member);
else
Message(0, "%s is no longer marked as an alt.", gmb->member);
}
}
void Client::Handle_OP_GuildPeace(const EQApplicationPacket *app)
{
LogGuilds("Got OP_GuildPeace of len [{}]", app->size);
return;
}
void Client::Handle_OP_GuildPromote(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildPromote");
if (app->size != sizeof(GuildPromoteStruct)) {
LogGuilds("Error: app size of [{}] != size of GuildDemoteStruct of [{}]\n", app->size, sizeof(GuildPromoteStruct));
return;
}
if (!IsInAGuild())
Message(0, "Error: You arent in a guild!");
else if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_PROMOTE))
Message(0, "You dont have permission to invite.");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
GuildPromoteStruct* promote = (GuildPromoteStruct*)app->pBuffer;
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(promote->target, gci)) {
Message(0, "Unable to find '%s'", promote->target);
return;
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
uint8 rank = gci.rank + 1;
if (rank > GUILD_OFFICER)
{
Message(0, "You cannot promote someone to be guild leader. You must use /guildleader.");
return;
}
LogGuilds("Promoting [{}] ([{}]) from rank [{}] ([{}]) to [{}] ([{}]) in [{}] ([{}])",
promote->target, gci.char_id,
guild_mgr.GetRankName(GuildID(), gci.rank), gci.rank,
guild_mgr.GetRankName(GuildID(), rank), rank,
guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.SetGuildRank(gci.char_id, rank)) {
Message(Chat::Red, "Error while setting rank %d on '%s'.", rank, promote->target);
return;
}
Message(0, "Successfully promoted %s to rank %d", promote->target, rank);
}
return;
}
void Client::Handle_OP_GuildPublicNote(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildPublicNote");
if (app->size < sizeof(GuildUpdate_PublicNote)) {
// client calls for a motd on login even if they arent in a guild
printf("Error: app size of %i < size of OP_GuildPublicNote of %zu\n", app->size, sizeof(GuildUpdate_PublicNote));
return;
}
GuildUpdate_PublicNote* gpn = (GuildUpdate_PublicNote*)app->pBuffer;
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(gpn->target, gci)) {
Message(0, "Unable to find '%s'", gpn->target);
return;
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
LogGuilds("Setting public note on [{}] ([{}]) in guild [{}] ([{}]) to: [{}]",
gpn->target, gci.char_id,
guild_mgr.GetGuildName(GuildID()), GuildID(),
gpn->note);
if (!guild_mgr.SetPublicNote(gci.char_id, gpn->note)) {
Message(Chat::Red, "Failed to set public note on %s", gpn->target);
}
else {
Message(0, "Successfully changed public note on %s", gpn->target);
}
// SendGuildMembers(GuildID(), true);
return;
}
void Client::Handle_OP_GuildRemove(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildRemove");
if (app->size != sizeof(GuildCommand_Struct)) {
std::cout << "Wrong size: OP_GuildRemove, size=" << app->size << ", expected " << sizeof(GuildCommand_Struct) << std::endl;
return;
}
GuildCommand_Struct* gc = (GuildCommand_Struct*)app->pBuffer;
if (!IsInAGuild())
Message(0, "Error: You arent in a guild!");
// we can always remove ourself, otherwise, our rank needs remove permissions
else if (strcasecmp(gc->othername, GetName()) != 0 &&
!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_REMOVE))
Message(0, "You dont have permission to remove guild members.");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
#ifdef BOTS
if (Bot::ProcessGuildRemoval(this, gc->othername))
return;
#endif
uint32 char_id;
Client* client = entity_list.GetClientByName(gc->othername);
if (client) {
if (!client->IsInGuild(GuildID())) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
char_id = client->CharacterID();
LogGuilds("Removing [{}] ([{}]) from guild [{}] ([{}])",
client->GetName(), client->CharacterID(),
guild_mgr.GetGuildName(GuildID()), GuildID());
}
else {
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(gc->othername, gci)) {
Message(0, "Unable to find '%s'", gc->othername);
return;
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
char_id = gci.char_id;
LogGuilds("Removing remote/offline [{}] ([{}]) into guild [{}] ([{}])",
gci.char_name.c_str(), gci.char_id,
guild_mgr.GetGuildName(GuildID()), GuildID());
}
if (!guild_mgr.SetGuild(char_id, GUILD_NONE, 0)) {
auto outapp = new EQApplicationPacket(OP_GuildManageRemove, sizeof(GuildManageRemove_Struct));
GuildManageRemove_Struct* gm = (GuildManageRemove_Struct*)outapp->pBuffer;
gm->guildeqid = GuildID();
strcpy(gm->member, gc->othername);
Message(0, "%s successfully removed from your guild.", gc->othername);
entity_list.QueueClientsGuild(this, outapp, false, GuildID());
safe_delete(outapp);
}
else
Message(0, "Unable to remove %s from your guild.", gc->othername);
}
// SendGuildMembers(GuildID(), true);
return;
}
void Client::Handle_OP_GuildStatus(const EQApplicationPacket *app)
{
if (app->size != sizeof(GuildStatus_Struct))
{
LogDebug("Size mismatch in OP_GuildStatus expected [{}] got [{}]", sizeof(GuildStatus_Struct), app->size);
DumpPacket(app);
return;
}
GuildStatus_Struct *gss = (GuildStatus_Struct*)app->pBuffer;
Client *c = entity_list.GetClientByName(gss->Name);
if (!c)
{
MessageString(Chat::LightGray, TARGET_PLAYER_FOR_GUILD_STATUS);
return;
}
uint32 TargetGuildID = c->GuildID();
if (TargetGuildID == GUILD_NONE)
{
MessageString(Chat::LightGray, NOT_IN_A_GUILD, c->GetName());
return;
}
const char *GuildName = guild_mgr.GetGuildName(TargetGuildID);
if (!GuildName)
return;
bool IsLeader = guild_mgr.CheckPermission(TargetGuildID, c->GuildRank(), GUILD_PROMOTE);
bool IsOfficer = guild_mgr.CheckPermission(TargetGuildID, c->GuildRank(), GUILD_INVITE);
if ((TargetGuildID == GuildID()) && (c != this))
{
if (IsLeader)
MessageString(Chat::LightGray, LEADER_OF_YOUR_GUILD, c->GetName());
else if (IsOfficer)
MessageString(Chat::LightGray, OFFICER_OF_YOUR_GUILD, c->GetName());
else
MessageString(Chat::LightGray, MEMBER_OF_YOUR_GUILD, c->GetName());
return;
}
if (IsLeader)
MessageString(Chat::LightGray, LEADER_OF_X_GUILD, c->GetName(), GuildName);
else if (IsOfficer)
MessageString(Chat::LightGray, OFFICER_OF_X_GUILD, c->GetName(), GuildName);
else
MessageString(Chat::LightGray, MEMBER_OF_X_GUILD, c->GetName(), GuildName);
}
void Client::Handle_OP_GuildUpdateURLAndChannel(const EQApplicationPacket *app)
{
if (app->size != sizeof(GuildUpdateURLAndChannel_Struct))
{
LogDebug("Size mismatch in OP_GuildUpdateURLAndChannel expected [{}] got [{}]", sizeof(GuildUpdateURLAndChannel_Struct), app->size);
DumpPacket(app);
return;
}
GuildUpdateURLAndChannel_Struct *guuacs = (GuildUpdateURLAndChannel_Struct*)app->pBuffer;
if (!IsInAGuild())
return;
if (!guild_mgr.IsGuildLeader(GuildID(), CharacterID()))
{
Message(Chat::Red, "Only the guild leader can change the Channel or URL.!");
return;
}
if (guuacs->Action == 0)
guild_mgr.SetGuildURL(GuildID(), guuacs->Text);
else
guild_mgr.SetGuildChannel(GuildID(), guuacs->Text);
}
void Client::Handle_OP_GuildWar(const EQApplicationPacket *app)
{
LogGuilds("Got OP_GuildWar of len [{}]", app->size);
return;
}
void Client::Handle_OP_Heartbeat(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_Hide(const EQApplicationPacket *app)
{
// newer client respond to OP_CancelSneakHide with OP_Hide with a size of 4 and 0 data
if (app->size == 4) {
auto data = app->ReadUInt32(0);
if (data)
LogDebug("Got OP_Hide with unexpected data [{}]", data);
return;
}
if (!HasSkill(EQ::skills::SkillHide) && GetSkill(EQ::skills::SkillHide) == 0)
{
//Can not be able to train hide but still have it from racial though
return; //You cannot hide if you do not have hide
}
if (!p_timers.Expired(&database, pTimerHide, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
int reuse = HideReuseTime - GetSkillReuseTime(EQ::skills::SkillHide);
if (reuse < 1)
reuse = 1;
p_timers.Start(pTimerHide, reuse - 1);
float hidechance = ((GetSkill(EQ::skills::SkillHide) / 250.0f) + .25) * 100;
float random = zone->random.Real(0, 100);
CheckIncreaseSkill(EQ::skills::SkillHide, nullptr, 5);
if (random < hidechance) {
auto outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct* sa_out = (SpawnAppearance_Struct*)outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x03;
sa_out->parameter = 1;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
if (spellbonuses.ShroudofStealth || aabonuses.ShroudofStealth || itembonuses.ShroudofStealth) {
improved_hidden = true;
hidden = true;
}
else
hidden = true;
tmHidden = Timer::GetCurrentTime();
}
if (GetClass() == ROGUE) {
auto outapp = new EQApplicationPacket(OP_SimpleMessage, sizeof(SimpleMessage_Struct));
SimpleMessage_Struct *msg = (SimpleMessage_Struct *)outapp->pBuffer;
msg->color = 0x010E;
Mob *evadetar = GetTarget();
if (!auto_attack && (evadetar && evadetar->CheckAggro(this)
&& evadetar->IsNPC())) {
if (zone->random.Int(0, 260) < (int)GetSkill(EQ::skills::SkillHide)) {
msg->string_id = EVADE_SUCCESS;
RogueEvade(evadetar);
}
else {
msg->string_id = EVADE_FAIL;
}
}
else {
if (hidden) {
msg->string_id = HIDE_SUCCESS;
}
else {
msg->string_id = HIDE_FAIL;
}
}
FastQueuePacket(&outapp);
}
return;
}
void Client::Handle_OP_HideCorpse(const EQApplicationPacket *app)
{
// New OPCode for SOD+ as /hidecorpse is handled serverside now.
//
if (app->size != sizeof(HideCorpse_Struct))
{
LogDebug("Size mismatch in OP_HideCorpse expected [{}] got [{}]", sizeof(HideCorpse_Struct), app->size);
DumpPacket(app);
return;
}
HideCorpse_Struct *hcs = (HideCorpse_Struct*)app->pBuffer;
if (hcs->Action == HideCorpseLooted)
return;
if ((HideCorpseMode == HideCorpseNone) && (hcs->Action == HideCorpseNone))
return;
entity_list.HideCorpses(this, HideCorpseMode, hcs->Action);
HideCorpseMode = hcs->Action;
}
void Client::Handle_OP_Ignore(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_Illusion(const EQApplicationPacket *app)
{
if (app->size != sizeof(Illusion_Struct)) {
LogError("Received invalid sized OP_Illusion: got [{}], expected [{}]", app->size, sizeof(Illusion_Struct));
DumpPacket(app);
return;
}
if (!GetGM())
{
database.SetMQDetectionFlag(this->AccountName(), this->GetName(), "OP_Illusion sent by non Game Master.", zone->GetShortName());
return;
}
Illusion_Struct* bnpc = (Illusion_Struct*)app->pBuffer;
//these need to be implemented
/*
texture = bnpc->texture;
helmtexture = bnpc->helmtexture;
luclinface = bnpc->luclinface;
*/
race = bnpc->race;
size = 0;
entity_list.QueueClients(this, app);
return;
}
void Client::Handle_OP_InspectAnswer(const EQApplicationPacket *app)
{
if (app->size != sizeof(InspectResponse_Struct)) {
LogError("Wrong size: OP_InspectAnswer, size=[{}], expected [{}]", app->size, sizeof(InspectResponse_Struct));
return;
}
//Fills the app sent from client.
EQApplicationPacket* outapp = app->Copy();
InspectResponse_Struct* insr = (InspectResponse_Struct*)outapp->pBuffer;
Mob* tmp = entity_list.GetMob(insr->TargetID);
const EQ::ItemData* item = nullptr;
int ornamentationAugtype = RuleI(Character, OrnamentationAugmentType);
for (int16 L = EQ::invslot::EQUIPMENT_BEGIN; L <= EQ::invslot::EQUIPMENT_END; L++) {
const EQ::ItemInstance* inst = GetInv().GetItem(L);
item = inst ? inst->GetItem() : nullptr;
if (item) {
strcpy(insr->itemnames[L], item->Name);
if (inst && inst->GetOrnamentationAug(ornamentationAugtype)) {
const EQ::ItemData *aug_item = inst->GetOrnamentationAug(ornamentationAugtype)->GetItem();
insr->itemicons[L] = aug_item->Icon;
}
else if (inst->GetOrnamentationIcon()) {
insr->itemicons[L] = inst->GetOrnamentationIcon();
}
else {
insr->itemicons[L] = item->Icon;
}
}
else { insr->itemicons[L] = 0xFFFFFFFF; }
}
InspectMessage_Struct* newmessage = (InspectMessage_Struct*)insr->text;
InspectMessage_Struct& playermessage = this->GetInspectMessage();
memcpy(&playermessage, newmessage, sizeof(InspectMessage_Struct));
database.SaveCharacterInspectMessage(this->CharacterID(), &playermessage);
if (tmp != 0 && tmp->IsClient()) { tmp->CastToClient()->QueuePacket(outapp); } // Send answer to requester
return;
}
void Client::Handle_OP_InspectMessageUpdate(const EQApplicationPacket *app)
{
if (app->size != sizeof(InspectMessage_Struct)) {
LogError("Wrong size: OP_InspectMessageUpdate, size=[{}], expected [{}]", app->size, sizeof(InspectMessage_Struct));
return;
}
InspectMessage_Struct* newmessage = (InspectMessage_Struct*)app->pBuffer;
InspectMessage_Struct& playermessage = this->GetInspectMessage();
memcpy(&playermessage, newmessage, sizeof(InspectMessage_Struct));
database.SaveCharacterInspectMessage(this->CharacterID(), &playermessage);
}
void Client::Handle_OP_InspectRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(Inspect_Struct)) {
LogError("Wrong size: OP_InspectRequest, size=[{}], expected [{}]", app->size, sizeof(Inspect_Struct));
return;
}
Inspect_Struct* ins = (Inspect_Struct*)app->pBuffer;
Mob* tmp = entity_list.GetMob(ins->TargetID);
if (tmp != 0 && tmp->IsClient()) {
if (tmp->CastToClient()->ClientVersion() < EQ::versions::ClientVersion::SoF) { tmp->CastToClient()->QueuePacket(app); } // Send request to target
// Inspecting an SoF or later client will make the server handle the request
else { ProcessInspectRequest(tmp->CastToClient(), this); }
}
#ifdef BOTS
if (tmp != 0 && tmp->IsBot()) { Bot::ProcessBotInspectionRequest(tmp->CastToBot(), this); }
#endif
return;
}
void Client::Handle_OP_InstillDoubt(const EQApplicationPacket *app)
{
//packet is empty as of 12/14/04
if (!p_timers.Expired(&database, pTimerInstillDoubt, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerInstillDoubt, InstillDoubtReuseTime - 1);
InstillDoubt(GetTarget());
return;
}
void Client::Handle_OP_ItemLinkClick(const EQApplicationPacket *app)
{
if (app->size != sizeof(ItemViewRequest_Struct)) {
LogError("Wrong size on OP_ItemLinkClick. Got: [{}], Expected: [{}]", app->size,
sizeof(ItemViewRequest_Struct));
DumpPacket(app);
return;
}
ItemViewRequest_Struct *ivrs = (ItemViewRequest_Struct *)app->pBuffer;
// todo: verify ivrs->link_hash based on a rule, in case we don't care about people being able to sniff data
// from the item DB
const EQ::ItemData *item = database.GetItem(ivrs->item_id);
if (!item) {
if (ivrs->item_id != SAYLINK_ITEM_ID) {
Message(Chat::Red, "Error: The item for the link you have clicked on does not exist!");
return;
}
// This new scheme will shuttle the ID in the first augment for non-silent links
// and the second augment for silent.
std::string response = "";
bool silentsaylink = ivrs->augments[1] > 0 ? true : false;
int sayid = silentsaylink ? ivrs->augments[1] : ivrs->augments[0];
if (sayid > 0) {
std::string query = StringFormat("SELECT `phrase` FROM saylink WHERE `id` = '%i'", sayid);
auto results = database.QueryDatabase(query);
if (!results.Success()) {
Message(Chat::Red, "Error: The saylink (%s) was not found in the database.", response.c_str());
return;
}
if (results.RowCount() != 1) {
Message(Chat::Red, "Error: The saylink (%s) was not found in the database.", response.c_str());
return;
}
auto row = results.begin();
response = row[0];
}
if ((response).size() > 0) {
if (!mod_saylink(response, silentsaylink)) {
return;
}
if (GetTarget() && GetTarget()->IsNPC()) {
if (silentsaylink) {
parse->EventNPC(EVENT_SAY, GetTarget()->CastToNPC(), this, response.c_str(), 0);
if (response[0] == '#' && parse->PlayerHasQuestSub(EVENT_COMMAND)) {
parse->EventPlayer(EVENT_COMMAND, this, response.c_str(), 0);
}
#ifdef BOTS
else if (response[0] == '^' && parse->PlayerHasQuestSub(EVENT_BOT_COMMAND)) {
parse->EventPlayer(EVENT_BOT_COMMAND, this, response.c_str(), 0);
}
#endif
else {
parse->EventPlayer(EVENT_SAY, this, response.c_str(), 0);
}
}
else {
Message(Chat::LightGray, "You say, '%s'", response.c_str());
ChannelMessageReceived(8, 0, 100, response.c_str());
}
return;
}
else {
if (silentsaylink) {
if (response[0] == '#' && parse->PlayerHasQuestSub(EVENT_COMMAND)) {
parse->EventPlayer(EVENT_COMMAND, this, response.c_str(), 0);
}
#ifdef BOTS
else if (response[0] == '^' && parse->PlayerHasQuestSub(EVENT_BOT_COMMAND)) {
parse->EventPlayer(EVENT_BOT_COMMAND, this, response.c_str(), 0);
}
#endif
else {
parse->EventPlayer(EVENT_SAY, this, response.c_str(), 0);
}
}
else {
Message(Chat::LightGray, "You say, '%s'", response.c_str());
ChannelMessageReceived(8, 0, 100, response.c_str());
}
return;
}
}
else {
Message(Chat::Red, "Error: Say Link not found or is too long.");
return;
}
}
EQ::ItemInstance *inst =
database.CreateItem(item, item->MaxCharges, ivrs->augments[0], ivrs->augments[1], ivrs->augments[2],
ivrs->augments[3], ivrs->augments[4], ivrs->augments[5]);
if (inst) {
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
return;
}
void Client::Handle_OP_ItemLinkResponse(const EQApplicationPacket *app)
{
if (app->size != sizeof(LDONItemViewRequest_Struct)) {
LogError("OP size error: OP_ItemLinkResponse expected:[{}] got:[{}]", sizeof(LDONItemViewRequest_Struct), app->size);
return;
}
LDONItemViewRequest_Struct* item = (LDONItemViewRequest_Struct*)app->pBuffer;
EQ::ItemInstance* inst = database.CreateItem(item->item_id);
if (inst) {
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
return;
}
void Client::Handle_OP_ItemName(const EQApplicationPacket *app)
{
if (app->size != sizeof(ItemNamePacket_Struct)) {
LogError("Invalid size for ItemNamePacket_Struct: Expected: [{}], Got: [{}]",
sizeof(ItemNamePacket_Struct), app->size);
return;
}
ItemNamePacket_Struct *p = (ItemNamePacket_Struct*)app->pBuffer;
const EQ::ItemData *item = nullptr;
if ((item = database.GetItem(p->item_id)) != nullptr) {
auto outapp = new EQApplicationPacket(OP_ItemName, sizeof(ItemNamePacket_Struct));
p = (ItemNamePacket_Struct*)outapp->pBuffer;
memset(p, 0, sizeof(ItemNamePacket_Struct));
strcpy(p->name, item->Name);
FastQueuePacket(&outapp);
}
return;
}
void Client::Handle_OP_ItemPreview(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_ItemPreview, app, ItemPreview_Struct);
ItemPreview_Struct *ips = (ItemPreview_Struct *)app->pBuffer;
const EQ::ItemData* item = database.GetItem(ips->itemid);
if (item) {
auto outapp = new EQApplicationPacket(OP_ItemPreview, strlen(item->Name) + strlen(item->Lore) +
strlen(item->IDFile) + 898);
int spacer;
for (spacer = 0; spacer < 16; spacer++) {
outapp->WriteUInt8(48);
}
outapp->WriteUInt16(256);
for (spacer = 0; spacer < 7; spacer++) {
outapp->WriteUInt8(0);
}
for (spacer = 0; spacer < 7; spacer++) {
outapp->WriteUInt8(255);
}
outapp->WriteUInt32(0);
outapp->WriteUInt32(1);
outapp->WriteUInt32(0);
outapp->WriteUInt8(237); // Seems to be some kind of counter? increases by 1 for each preview that you do.
outapp->WriteUInt16(2041); //F907
for (spacer = 0; spacer < 36; spacer++) {
outapp->WriteUInt8(0);
}
for (spacer = 0; spacer < 4; spacer++) {
outapp->WriteUInt8(255);
}
for (spacer = 0; spacer < 9; spacer++) {
outapp->WriteUInt8(0);
}
for (spacer = 0; spacer < 5; spacer++) {
outapp->WriteUInt8(255);
}
for (spacer = 0; spacer < 5; spacer++) {
outapp->WriteUInt8(0);
}
outapp->WriteString(item->Name);
outapp->WriteString(item->Lore);
outapp->WriteUInt8(0);
outapp->WriteUInt32(ips->itemid);
outapp->WriteUInt32(item->Weight);
outapp->WriteUInt8(item->NoRent);
outapp->WriteUInt8(item->NoDrop);
outapp->WriteUInt8(item->Attuneable);
outapp->WriteUInt8(item->Size);
outapp->WriteUInt32(item->Slots);
outapp->WriteUInt32(item->Price);
outapp->WriteUInt32(item->Icon);
outapp->WriteUInt8(0); //Unknown?
outapp->WriteUInt8(0); //Placeable flag?
outapp->WriteUInt32(item->BenefitFlag);
outapp->WriteUInt8(item->Tradeskills);
outapp->WriteUInt8(item->CR);
outapp->WriteUInt8(item->DR);
outapp->WriteUInt8(item->PR);
outapp->WriteUInt8(item->MR);
outapp->WriteUInt8(item->FR);
outapp->WriteUInt8(item->AStr);
outapp->WriteUInt8(item->ASta);
outapp->WriteUInt8(item->AAgi);
outapp->WriteUInt8(item->ADex);
outapp->WriteUInt8(item->ACha);
outapp->WriteUInt8(item->AInt);
outapp->WriteUInt8(item->AWis);
outapp->WriteSInt32(item->HP);
outapp->WriteSInt32(item->Mana);
outapp->WriteSInt32(item->Endur);
outapp->WriteSInt32(item->AC);
outapp->WriteUInt32(item->Regen);
outapp->WriteUInt32(item->ManaRegen);
outapp->WriteSInt32(item->EnduranceRegen);
outapp->WriteUInt32(item->Classes);
outapp->WriteUInt32(item->Races);
outapp->WriteUInt32(item->Deity);
outapp->WriteUInt32(item->SkillModValue);
outapp->WriteUInt32(0); //SkillModValue
outapp->WriteUInt32(item->SkillModType);
outapp->WriteUInt32(0); //SkillModExtra
outapp->WriteUInt32(item->BaneDmgRace);
outapp->WriteUInt32(item->BaneDmgBody);
outapp->WriteUInt32(item->BaneDmgRaceAmt);
outapp->WriteUInt32(item->BaneDmgAmt);
outapp->WriteUInt8(item->Magic);
outapp->WriteUInt32(item->CastTime_);
outapp->WriteUInt32(item->ReqLevel);
outapp->WriteUInt32(item->RecLevel);
outapp->WriteUInt32(item->RecSkill);
outapp->WriteUInt32(item->BardType);
outapp->WriteUInt32(item->BardValue);
outapp->WriteUInt8(item->Light);
outapp->WriteUInt8(item->Delay);
outapp->WriteUInt8(item->ElemDmgType);
outapp->WriteUInt8(item->ElemDmgAmt);
outapp->WriteUInt8(item->Range);
outapp->WriteUInt32(item->Damage);
outapp->WriteUInt32(item->Color);
outapp->WriteUInt32(0); // Prestige
outapp->WriteUInt8(item->ItemType);
outapp->WriteUInt32(item->Material);
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt32(item->EliteMaterial);
outapp->WriteUInt32(item->HerosForgeModel);
outapp->WriteUInt32(0); // unknown
outapp->WriteUInt32(0); //This is unknown057 from lucy
for (spacer = 0; spacer < 77; spacer++) { //More Item stats, but some seem to be off based on packet check
outapp->WriteUInt8(0);
}
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
outapp->WriteUInt32(0); //Unknown
for (spacer = 0; spacer < 6; spacer++) { //Augment stuff
outapp->WriteUInt32(item->AugSlotType[spacer]);
outapp->WriteUInt8(item->AugSlotVisible[spacer]);
outapp->WriteUInt8(item->AugSlotUnk2[spacer]);
}
outapp->WriteUInt32(0); //New RoF 6th Aug Slot
outapp->WriteUInt8(1); //^
outapp->WriteUInt8(0); //^^
outapp->WriteUInt32(item->LDoNSold);
outapp->WriteUInt32(item->LDoNTheme);
outapp->WriteUInt32(item->LDoNPrice);
outapp->WriteUInt32(item->LDoNSellBackRate);
for (spacer = 0; spacer < 11; spacer++) { //unknowns
outapp->WriteUInt8(0);
}
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
outapp->WriteUInt16(0); //Unknown
outapp->WriteUInt32(item->Favor); // Tribute
for (spacer = 0; spacer < 17; spacer++) { //unknowns
outapp->WriteUInt8(0);
}
outapp->WriteUInt32(item->GuildFavor); // Tribute
outapp->WriteUInt32(0); //Unknown
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
for (spacer = 0; spacer < 11; spacer++) { //unknowns
outapp->WriteUInt8(0);
}
outapp->WriteUInt8(1);
for (spacer = 0; spacer < 25; spacer++) { //unknowns
outapp->WriteUInt8(0);
}
for (spacer = 0; spacer < 304; spacer++) { //Cast stuff and whole bunch of unknowns
outapp->WriteUInt8(0);
}
outapp->WriteUInt8(142); // Always seen not in the item structure though 8E
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt32(1); // Always seen as 1
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt32(0xCDCCCC3D); // Unknown
outapp->WriteUInt32(0);
outapp->WriteUInt16(8256); //0x4020/8256
outapp->WriteUInt16(0);
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
outapp->WriteUInt16(0);
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt16(0); //unknown
outapp->WriteUInt32(32831); //0x3F80
for (spacer = 0; spacer < 24; spacer++) { //whole bunch of unknowns always 0's
outapp->WriteUInt8(0);
}
outapp->WriteUInt8(1);
for (spacer = 0; spacer < 6; spacer++) { //whole bunch of unknowns always 0's
outapp->WriteUInt8(0);
}
QueuePacket(outapp);
safe_delete(outapp);
}
else
return;
}
void Client::Handle_OP_ItemVerifyRequest(const EQApplicationPacket *app)
{
using EQ::spells::CastingSlot;
if (app->size != sizeof(ItemVerifyRequest_Struct))
{
LogError("OP size error: OP_ItemVerifyRequest expected:[{}] got:[{}]", sizeof(ItemVerifyRequest_Struct), app->size);
return;
}
ItemVerifyRequest_Struct* request = (ItemVerifyRequest_Struct*)app->pBuffer;
int32 slot_id;
int32 target_id;
int32 spell_id = 0;
slot_id = request->slot;
target_id = request->target;
cheat_manager.ProcessItemVerifyRequest(request->slot, request->target);
EQApplicationPacket *outapp = nullptr;
outapp = new EQApplicationPacket(OP_ItemVerifyReply, sizeof(ItemVerifyReply_Struct));
ItemVerifyReply_Struct* reply = (ItemVerifyReply_Struct*)outapp->pBuffer;
reply->slot = slot_id;
reply->target = target_id;
QueuePacket(outapp);
safe_delete(outapp);
if (IsAIControlled()) {
this->MessageString(Chat::Red, NOT_IN_CONTROL);
return;
}
if (slot_id < 0) {
LogDebug("Unknown slot being used by [{}], slot being used is: [{}]", GetName(), request->slot);
return;
}
const EQ::ItemInstance* inst = m_inv[slot_id];
if (!inst) {
Message(0, "Error: item not found in inventory slot #%i", slot_id);
DeleteItemInInventory(slot_id, 0, true);
return;
}
const EQ::ItemData* item = inst->GetItem();
if (!item) {
Message(0, "Error: item not found in inventory slot #%i", slot_id);
DeleteItemInInventory(slot_id, 0, true);
return;
}
spell_id = item->Click.Effect;
if
(
spell_id > 0 &&
(
!IsValidSpell(spell_id) ||
casting_spell_id ||
delaytimer ||
spellend_timer.Enabled() ||
IsStunned() ||
IsFeared() ||
IsMezzed() ||
DivineAura() ||
(spells[spell_id].targettype == ST_Ring) ||
(IsSilenced() && !IsDiscipline(spell_id)) ||
(IsAmnesiad() && IsDiscipline(spell_id)) ||
(IsDetrimentalSpell(spell_id) && !zone->CanDoCombat()) ||
(inst->IsScaling() && inst->GetExp() <= 0) // charms don't have spells when less than 0
)
)
{
SendSpellBarEnable(spell_id);
return;
}
// Modern clients don't require pet targeted for item clicks that are ST_Pet
if (spell_id > 0 && (spells[spell_id].targettype == ST_Pet || spells[spell_id].targettype == ST_SummonedPet))
target_id = GetPetID();
LogDebug("OP ItemVerifyRequest: spell=[{}], target=[{}], inv=[{}]", spell_id, target_id, slot_id);
if (m_inv.SupportsClickCasting(slot_id) || ((item->ItemType == EQ::item::ItemTypePotion || item->PotionBelt) && m_inv.SupportsPotionBeltCasting(slot_id))) // sanity check
{
EQ::ItemInstance* p_inst = (EQ::ItemInstance*)inst;
parse->EventItem(EVENT_ITEM_CLICK, this, p_inst, nullptr, "", slot_id);
inst = m_inv[slot_id];
if (!inst)
{
return;
}
int r;
bool tryaug = false;
EQ::ItemInstance* clickaug = nullptr;
EQ::ItemData* augitem = nullptr;
for (r = EQ::invaug::SOCKET_BEGIN; r <= EQ::invaug::SOCKET_END; r++) {
const EQ::ItemInstance* aug_i = inst->GetAugment(r);
if (!aug_i)
continue;
const EQ::ItemData* aug = aug_i->GetItem();
if (!aug)
continue;
if ((aug->Click.Type == EQ::item::ItemEffectClick) || (aug->Click.Type == EQ::item::ItemEffectExpendable) || (aug->Click.Type == EQ::item::ItemEffectEquipClick) || (aug->Click.Type == EQ::item::ItemEffectClick2))
{
tryaug = true;
clickaug = (EQ::ItemInstance*)aug_i;
augitem = (EQ::ItemData*)aug;
spell_id = aug->Click.Effect;
break;
}
}
if ((spell_id <= 0) && (item->ItemType != EQ::item::ItemTypeFood && item->ItemType != EQ::item::ItemTypeDrink && item->ItemType != EQ::item::ItemTypeAlcohol && item->ItemType != EQ::item::ItemTypeSpell))
{
LogDebug("Item with no effect right clicked by [{}]", GetName());
}
else if (inst->IsClassCommon())
{
if (!RuleB(Skills, RequireTomeHandin) && item->ItemType == EQ::item::ItemTypeSpell && (strstr((const char*)item->Name, "Tome of ") || strstr((const char*)item->Name, "Skill: ")))
{
DeleteItemInInventory(slot_id, 1, true);
TrainDiscipline(item->ID);
}
else if (item->ItemType == EQ::item::ItemTypeSpell)
{
if (RuleB(Spells, AllowSpellMemorizeFromItem)) {
DeleteItemInInventory(slot_id, 1, true);
MemorizeSpellFromItem(item->ID);
} else {
return;
}
}
else if ((item->Click.Type == EQ::item::ItemEffectClick) || (item->Click.Type == EQ::item::ItemEffectExpendable) || (item->Click.Type == EQ::item::ItemEffectEquipClick) || (item->Click.Type == EQ::item::ItemEffectClick2))
{
if (inst->GetCharges() == 0)
{
//Message(0, "This item is out of charges.");
MessageString(Chat::Red, ITEM_OUT_OF_CHARGES);
return;
}
if (GetLevel() >= item->Click.Level2)
{
int i = parse->EventItem(EVENT_ITEM_CLICK_CAST, this, p_inst, nullptr, "", slot_id);
inst = m_inv[slot_id];
if (!inst)
{
return;
}
if (i == 0) {
if (!IsCastWhileInvis(item->Click.Effect))
CommonBreakInvisible(); // client can't do this for us :(
CastSpell(item->Click.Effect, target_id, CastingSlot::Item, item->CastTime, 0, 0, slot_id);
}
}
else
{
MessageString(Chat::Red, ITEMS_INSUFFICIENT_LEVEL);
return;
}
}
else if (tryaug)
{
if (clickaug->GetCharges() == 0)
{
//Message(0, "This item is out of charges.");
MessageString(Chat::Red, ITEM_OUT_OF_CHARGES);
return;
}
if (GetLevel() >= augitem->Click.Level2)
{
int i = parse->EventItem(EVENT_ITEM_CLICK_CAST, this, clickaug, nullptr, "", slot_id);
inst = m_inv[slot_id];
if (!inst)
{
return;
}
if (i == 0) {
if (!IsCastWhileInvis(augitem->Click.Effect))
CommonBreakInvisible(); // client can't do this for us :(
CastSpell(augitem->Click.Effect, target_id, CastingSlot::Item, augitem->CastTime, 0, 0, slot_id);
}
}
else
{
MessageString(Chat::Red, ITEMS_INSUFFICIENT_LEVEL);
return;
}
}
else
{
if (ClientVersion() >= EQ::versions::ClientVersion::SoD && !inst->IsEquipable(GetBaseRace(), GetClass()))
{
if (item->ItemType != EQ::item::ItemTypeFood && item->ItemType != EQ::item::ItemTypeDrink && item->ItemType != EQ::item::ItemTypeAlcohol)
{
LogDebug("Error: unknown item->Click.Type ([{}])", item->Click.Type);
}
else
{
/*
//This is food/drink - consume it
if (item->ItemType == EQ::item::ItemTypeFood && m_pp.hunger_level < 5000)
{
Consume(item, item->ItemType, slot_id, false);
}
else if (item->ItemType == EQ::item::ItemTypeDrink && m_pp.thirst_level < 5000)
{
Consume(item, item->ItemType, slot_id, false);
}
else if (item->ItemType == EQ::item::ItemTypeAlcohol)
{
#if EQDEBUG >= 1
LogDebug("Drinking Alcohol from slot:[{}]", slot_id);
#endif
// This Seems to be handled in OP_DeleteItem handling
//DeleteItemInInventory(slot_id, 1, false);
//entity_list.MessageCloseString(this, true, 50, 0, DRINKING_MESSAGE, GetName(), item->Name);
//Should add intoxication level to the PP at some point
//CheckIncreaseSkill(ALCOHOL_TOLERANCE, nullptr, 25);
}
EQApplicationPacket *outapp2 = nullptr;
outapp2 = new EQApplicationPacket(OP_Stamina, sizeof(Stamina_Struct));
Stamina_Struct* sta = (Stamina_Struct*)outapp2->pBuffer;
if (m_pp.hunger_level > 6000)
sta->food = 6000;
if (m_pp.thirst_level > 6000)
sta->water = 6000;
sta->food = m_pp.hunger_level;
sta->water = m_pp.thirst_level;
QueuePacket(outapp2);
safe_delete(outapp2);
*/
}
}
else
{
LogDebug("Error: unknown item->Click.Type ([{}])", item->Click.Type);
}
}
}
else
{
Message(0, "Error: item not found in inventory slot #%i", slot_id);
}
}
else
{
Message(0, "Error: Invalid inventory slot for using effects (inventory slot #%i)", slot_id);
}
return;
}
void Client::Handle_OP_Jump(const EQApplicationPacket *app)
{
SetEndurance(GetEndurance() - (GetLevel()<20 ? (225 * GetLevel() / 100) : 50));
return;
}
void Client::Handle_OP_KeyRing(const EQApplicationPacket *app)
{
KeyRingList();
}
void Client::Handle_OP_KickPlayers(const EQApplicationPacket *app)
{
auto buf = reinterpret_cast<KickPlayers_Struct*>(app->pBuffer);
if (buf->kick_expedition)
{
auto expedition = GetExpedition();
if (expedition)
{
expedition->DzKickPlayers(this);
}
}
else if (buf->kick_task && GetTaskState() && GetTaskState()->HasActiveSharedTask())
{
GetTaskState()->KickPlayersSharedTask(this);
}
}
void Client::Handle_OP_LDoNButton(const EQApplicationPacket *app)
{
if (app->size < sizeof(bool))
{
return;
}
if (GetPendingAdventureCreate())
{
return;
}
if (IsOnAdventure())
{
return;
}
bool* p = (bool*)app->pBuffer;
if (*p == true)
{
auto pack =
new ServerPacket(ServerOP_AdventureRequestCreate,
sizeof(ServerAdventureRequestCreate_Struct) + (64 * adv_requested_member_count));
ServerAdventureRequestCreate_Struct *sac = (ServerAdventureRequestCreate_Struct*)pack->pBuffer;
strcpy(sac->leader, GetName());
sac->id = adv_requested_id;
sac->theme = adv_requested_theme;
sac->member_count = adv_requested_member_count;
memcpy((pack->pBuffer + sizeof(ServerAdventureRequestCreate_Struct)), adv_requested_data, (64 * adv_requested_member_count));
worldserver.SendPacket(pack);
delete pack;
PendingAdventureCreate();
ClearPendingAdventureData();
}
else
{
ClearPendingAdventureData();
}
}
void Client::Handle_OP_LDoNDisarmTraps(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target->IsNPC())
{
if (HasSkill(EQ::skills::SkillDisarmTraps))
{
if (DistanceSquaredNoZ(m_Position, target->GetPosition()) > RuleI(Adventure, LDoNTrapDistanceUse))
{
Message(Chat::Red, "%s is too far away.", target->GetCleanName());
return;
}
HandleLDoNDisarm(target->CastToNPC(), GetSkill(EQ::skills::SkillDisarmTraps), LDoNTypeMechanical);
}
else
Message(Chat::Red, "You do not have the disarm trap skill.");
}
}
void Client::Handle_OP_LDoNInspect(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target && target->GetClass() == LDON_TREASURE)
Message(Chat::Yellow, "%s", target->GetCleanName());
}
void Client::Handle_OP_LDoNOpen(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target && target->IsNPC())
HandleLDoNOpen(target->CastToNPC());
}
void Client::Handle_OP_LDoNPickLock(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target->IsNPC())
{
if (HasSkill(EQ::skills::SkillPickLock))
{
if (DistanceSquaredNoZ(m_Position, target->GetPosition()) > RuleI(Adventure, LDoNTrapDistanceUse))
{
Message(Chat::Red, "%s is too far away.", target->GetCleanName());
return;
}
HandleLDoNPickLock(target->CastToNPC(), GetSkill(EQ::skills::SkillPickLock), LDoNTypeMechanical);
}
else
Message(Chat::Red, "You do not have the pick locks skill.");
}
}
void Client::Handle_OP_LDoNSenseTraps(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target->IsNPC())
{
if (HasSkill(EQ::skills::SkillSenseTraps))
{
if (DistanceSquaredNoZ(m_Position, target->GetPosition()) > RuleI(Adventure, LDoNTrapDistanceUse))
{
Message(Chat::Red, "%s is too far away.", target->GetCleanName());
return;
}
HandleLDoNSenseTraps(target->CastToNPC(), GetSkill(EQ::skills::SkillSenseTraps), LDoNTypeMechanical);
}
else
Message(Chat::Red, "You do not have the sense traps skill.");
}
}
void Client::Handle_OP_LeadershipExpToggle(const EQApplicationPacket *app)
{
if (app->size != 1) {
LogDebug("Size mismatch in OP_LeadershipExpToggle expected [{}] got [{}]", 1, app->size);
DumpPacket(app);
return;
}
uint8 *mode = (uint8 *)app->pBuffer;
if (*mode) {
m_pp.leadAAActive = 1;
Save();
MessageString(Chat::Yellow, LEADERSHIP_EXP_ON);
}
else {
m_pp.leadAAActive = 0;
Save();
MessageString(Chat::Yellow, LEADERSHIP_EXP_OFF);
}
}
void Client::Handle_OP_LeaveAdventure(const EQApplicationPacket *app)
{
if (!IsOnAdventure())
{
return;
}
LeaveAdventure();
}
void Client::Handle_OP_LeaveBoat(const EQApplicationPacket *app)
{
Mob* boat = entity_list.GetMob(this->controlling_boat_id); // find the mob corresponding to the boat id
if (boat) {
if ((boat->GetTarget() == this) && boat->GetHateAmount(this) == 0) { // if the client somehow left while still controlling the boat (and the boat isn't attacking them)
boat->SetTarget(nullptr); // fix it to stop later problems
}
}
this->controlling_boat_id = 0;
return;
}
void Client::Handle_OP_LFGCommand(const EQApplicationPacket *app)
{
if (app->size != sizeof(LFG_Struct)) {
std::cout << "Wrong size on OP_LFGCommand. Got: " << app->size << ", Expected: " << sizeof(LFG_Struct) << std::endl;
DumpPacket(app);
return;
}
// Process incoming packet
LFG_Struct* lfg = (LFG_Struct*)app->pBuffer;
switch (lfg->value & 0xFF) {
case 0:
if (LFG) {
database.SetLFG(CharacterID(), false);
LFG = false;
LFGComments[0] = '\0';
}
break;
case 1:
if (!LFG) {
LFG = true;
database.SetLFG(CharacterID(), true);
}
LFGFromLevel = lfg->FromLevel;
LFGToLevel = lfg->ToLevel;
LFGMatchFilter = lfg->MatchFilter;
strn0cpy(LFGComments, lfg->Comments, sizeof(LFGComments));
break;
default:
Message(0, "Error: unknown LFG value %i", lfg->value);
}
UpdateWho();
// Issue outgoing packet to notify other clients
auto outapp = new EQApplicationPacket(OP_LFGAppearance, sizeof(LFG_Appearance_Struct));
LFG_Appearance_Struct* lfga = (LFG_Appearance_Struct*)outapp->pBuffer;
lfga->spawn_id = this->GetID();
lfga->lfg = (uint8)LFG;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
return;
}
void Client::Handle_OP_LFGGetMatchesRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(LFGGetMatchesRequest_Struct)) {
LogError("Wrong size: OP_LFGGetMatchesRequest, size=[{}], expected [{}]", app->size, sizeof(LFGGetMatchesRequest_Struct));
DumpPacket(app);
return;
}
LFGGetMatchesRequest_Struct* gmrs = (LFGGetMatchesRequest_Struct*)app->pBuffer;
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_LFGMatches, sizeof(ServerLFGMatchesRequest_Struct));
ServerLFGMatchesRequest_Struct* smrs = (ServerLFGMatchesRequest_Struct*)pack->pBuffer;
smrs->FromID = GetID();
smrs->QuerierLevel = GetLevel();
strcpy(smrs->FromName, GetName());
smrs->FromLevel = gmrs->FromLevel;
smrs->ToLevel = gmrs->ToLevel;
smrs->Classes = gmrs->Classes;
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void Client::Handle_OP_LFGuild(const EQApplicationPacket *app)
{
if (app->size < 4)
return;
uint32 Command = *((uint32 *)app->pBuffer);
switch (Command)
{
case 0:
{
VERIFY_PACKET_LENGTH(OP_LFGuild, app, LFGuild_PlayerToggle_Struct);
LFGuild_PlayerToggle_Struct *pts = (LFGuild_PlayerToggle_Struct *)app->pBuffer;
#ifdef DARWIN
#if __DARWIN_C_LEVEL < 200809L
if (strlen(pts->Comment) > 256)
#else
if (strnlen(pts->Comment, 256) > 256)
#endif // __DARWIN_C_LEVEL
#else
if (strnlen(pts->Comment, 256) > 256)
#endif // DARWIN
return;
auto pack = new ServerPacket(ServerOP_QueryServGeneric, strlen(GetName()) + strlen(pts->Comment) + 38);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_UpdatePlayerInfo);
pack->WriteUInt32(GetBaseClass());
pack->WriteUInt32(GetLevel());
pack->WriteUInt32(GetSpentAA());
pack->WriteString(pts->Comment);
pack->WriteUInt32(pts->Toggle);
pack->WriteUInt32(pts->TimeZone);
worldserver.SendPacket(pack);
safe_delete(pack);
break;
}
case 1:
{
VERIFY_PACKET_LENGTH(OP_LFGuild, app, LFGuild_GuildToggle_Struct);
LFGuild_GuildToggle_Struct *gts = (LFGuild_GuildToggle_Struct *)app->pBuffer;
#ifdef DARWIN
#if __DARWIN_C_LEVEL < 200809L
if (strlen(gts->Comment) > 256)
#else
if (strnlen(gts->Comment, 256) > 256)
#endif // __DARWIN_C_LEVEL
#else
if (strnlen(gts->Comment, 256) > 256)
#endif // __DARWIN
return;
auto pack =
new ServerPacket(ServerOP_QueryServGeneric, strlen(GetName()) + strlen(gts->Comment) +
strlen(guild_mgr.GetGuildName(GuildID())) + 43);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_UpdateGuildInfo);
pack->WriteString(guild_mgr.GetGuildName(GuildID()));
pack->WriteString(gts->Comment);
pack->WriteUInt32(gts->FromLevel);
pack->WriteUInt32(gts->ToLevel);
pack->WriteUInt32(gts->Classes);
pack->WriteUInt32(gts->AACount);
pack->WriteUInt32(gts->Toggle);
pack->WriteUInt32(gts->TimeZone);
worldserver.SendPacket(pack);
safe_delete(pack);
break;
}
case 3:
{
VERIFY_PACKET_LENGTH(OP_LFGuild, app, LFGuild_SearchPlayer_Struct);
auto pack = new ServerPacket(ServerOP_QueryServGeneric, strlen(GetName()) + 37);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_PlayerMatches);
LFGuild_SearchPlayer_Struct *sps = (LFGuild_SearchPlayer_Struct *)app->pBuffer;
pack->WriteUInt32(sps->FromLevel);
pack->WriteUInt32(sps->ToLevel);
pack->WriteUInt32(sps->MinAA);
pack->WriteUInt32(sps->TimeZone);
pack->WriteUInt32(sps->Classes);
worldserver.SendPacket(pack);
safe_delete(pack);
break;
}
case 4:
{
VERIFY_PACKET_LENGTH(OP_LFGuild, app, LFGuild_SearchGuild_Struct);
auto pack = new ServerPacket(ServerOP_QueryServGeneric, strlen(GetName()) + 33);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_GuildMatches);
LFGuild_SearchGuild_Struct *sgs = (LFGuild_SearchGuild_Struct *)app->pBuffer;
pack->WriteUInt32(sgs->Level);
pack->WriteUInt32(sgs->AAPoints);
pack->WriteUInt32(sgs->TimeZone);
pack->WriteUInt32(sgs->Class);
worldserver.SendPacket(pack);
safe_delete(pack);
break;
}
default:
break;
}
}
void Client::Handle_OP_LFPCommand(const EQApplicationPacket *app)
{
if (app->size != sizeof(LFP_Struct)) {
LogError("Wrong size: OP_LFPCommand, size=[{}], expected [{}]", app->size, sizeof(LFP_Struct));
DumpPacket(app);
return;
}
LFP_Struct *lfp = (LFP_Struct*)app->pBuffer;
LFP = lfp->Action != LFPOff;
database.SetLFP(CharacterID(), LFP);
if (!LFP) {
worldserver.StopLFP(CharacterID());
return;
}
GroupLFPMemberEntry LFPMembers[MAX_GROUP_MEMBERS];
for (unsigned int i = 0; i<MAX_GROUP_MEMBERS; i++) {
LFPMembers[i].Name[0] = '\0';
LFPMembers[i].Class = 0;
LFPMembers[i].Level = 0;
LFPMembers[i].Zone = 0;
LFPMembers[i].GuildID = 0xFFFF;
}
Group *g = GetGroup();
// Slot 0 is always for the group leader, or the player if not in a group
strcpy(LFPMembers[0].Name, GetName());
LFPMembers[0].Class = GetClass();
LFPMembers[0].Level = GetLevel();
LFPMembers[0].Zone = zone->GetZoneID();
LFPMembers[0].GuildID = GuildID();
if (g) {
// This should not happen. The client checks if you are in a group and will not let you put LFP on if
// you are not the leader.
if (!g->IsLeader(this)) {
LogError("Client sent LFP on for character [{}] who is grouped but not leader", GetName());
return;
}
// Fill the LFPMembers array with the rest of the group members, excluding ourself
// We don't fill in the class, level or zone, because we may not be able to determine
// them if the other group members are not in this zone. World will fill in this information
// for us, if it can.
int NextFreeSlot = 1;
for (unsigned int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if (strcasecmp(g->membername[i], LFPMembers[0].Name))
strcpy(LFPMembers[NextFreeSlot++].Name, g->membername[i]);
}
}
worldserver.UpdateLFP(CharacterID(), lfp->Action, lfp->MatchFilter, lfp->FromLevel, lfp->ToLevel, lfp->Classes,
lfp->Comments, LFPMembers);
}
void Client::Handle_OP_LFPGetMatchesRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(LFPGetMatchesRequest_Struct)) {
LogError("Wrong size: OP_LFPGetMatchesRequest, size=[{}], expected [{}]", app->size, sizeof(LFPGetMatchesRequest_Struct));
DumpPacket(app);
return;
}
LFPGetMatchesRequest_Struct* gmrs = (LFPGetMatchesRequest_Struct*)app->pBuffer;
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_LFPMatches, sizeof(ServerLFPMatchesRequest_Struct));
ServerLFPMatchesRequest_Struct* smrs = (ServerLFPMatchesRequest_Struct*)pack->pBuffer;
smrs->FromID = GetID();
smrs->FromLevel = gmrs->FromLevel;
smrs->ToLevel = gmrs->ToLevel;
smrs->QuerierLevel = GetLevel();
smrs->QuerierClass = GetClass();
strcpy(smrs->FromName, GetName());
worldserver.SendPacket(pack);
safe_delete(pack);
}
return;
}
void Client::Handle_OP_LoadSpellSet(const EQApplicationPacket *app)
{
if (app->size != sizeof(LoadSpellSet_Struct)) {
printf("Wrong size of LoadSpellSet_Struct! Expected: %zu, Got: %i\n", sizeof(LoadSpellSet_Struct), app->size);
return;
}
int i;
LoadSpellSet_Struct* ss = (LoadSpellSet_Struct*)app->pBuffer;
for (i = 0; i < EQ::spells::SPELL_GEM_COUNT; i++) {
if (ss->spell[i] != 0xFFFFFFFF)
UnmemSpell(i, true);
}
}
void Client::Handle_OP_Logout(const EQApplicationPacket *app)
{
LogDebug("[{}] sent a logout packet", GetName());
SendLogoutPackets();
auto outapp = new EQApplicationPacket(OP_LogoutReply);
FastQueuePacket(&outapp);
Disconnect();
return;
}
void Client::Handle_OP_LootItem(const EQApplicationPacket *app)
{
if (app->size != sizeof(LootingItem_Struct)) {
LogError("Wrong size: OP_LootItem, size=[{}], expected [{}]", app->size, sizeof(LootingItem_Struct));
return;
}
EQApplicationPacket* outapp = nullptr;
Entity* entity = entity_list.GetID(*((uint16*)app->pBuffer));
if (entity == 0) {
Message(Chat::Red, "Error: OP_LootItem: Corpse not found (ent = 0)");
outapp = new EQApplicationPacket(OP_LootComplete, 0);
QueuePacket(outapp);
safe_delete(outapp);
return;
}
if (entity->IsCorpse()) {
entity->CastToCorpse()->LootItem(this, app);
return;
}
else {
Message(Chat::Red, "Error: Corpse not found! (!ent->IsCorpse())");
Corpse::SendEndLootErrorPacket(this);
}
return;
}
void Client::Handle_OP_LootRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
std::cout << "Wrong size: OP_LootRequest, size=" << app->size << ", expected " << sizeof(uint32) << std::endl;
return;
}
Entity* ent = entity_list.GetID(*((uint32*)app->pBuffer));
if (ent == 0) {
Message(Chat::Red, "Error: OP_LootRequest: Corpse not found (ent = 0)");
Corpse::SendLootReqErrorPacket(this);
return;
}
if (ent->IsCorpse())
{
SetLooting(ent->GetID()); //store the entity we are looting
ent->CastToCorpse()->MakeLootRequestPackets(this, app);
return;
}
else {
std::cout << "npc == 0 LOOTING FOOKED3" << std::endl;
Message(Chat::Red, "Error: OP_LootRequest: Corpse not a corpse?");
Corpse::SendLootReqErrorPacket(this);
}
return;
}
void Client::Handle_OP_ManaChange(const EQApplicationPacket *app)
{
if (app->size == 0) {
// i think thats the sign to stop the songs
if (IsBardSong(casting_spell_id) || bardsong != 0)
InterruptSpell(SONG_ENDS, 0x121);
else
InterruptSpell(INTERRUPT_SPELL, 0x121);
return;
}
else // I don't think the client sends proper manachanges
{ // with a length, just the 0 len ones for stopping songs
//ManaChange_Struct* p = (ManaChange_Struct*)app->pBuffer;
printf("OP_ManaChange from client:\n");
DumpPacket(app);
}
return;
}
/*
#if 0 // I dont think there's an op for this now, and we check this
// when the client is sitting
void Client::Handle_OP_Medding(const EQApplicationPacket *app)
{
if (app->pBuffer[0])
medding = true;
else
medding = false;
return;
}
#endif
*/
void Client::Handle_OP_MemorizeSpell(const EQApplicationPacket *app)
{
cheat_manager.CheckMemTimer();
OPMemorizeSpell(app);
return;
}
void Client::Handle_OP_Mend(const EQApplicationPacket *app)
{
if (!HasSkill(EQ::skills::SkillMend))
return;
if (!p_timers.Expired(&database, pTimerMend, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerMend, MendReuseTime - 1);
int mendhp = GetMaxHP() / 4;
int currenthp = GetHP();
if (zone->random.Int(0, 199) < (int)GetSkill(EQ::skills::SkillMend)) {
int criticalchance = spellbonuses.CriticalMend + itembonuses.CriticalMend + aabonuses.CriticalMend;
if (zone->random.Int(0, 99) < criticalchance) {
mendhp *= 2;
MessageString(Chat::LightBlue, MEND_CRITICAL);
}
SetHP(GetHP() + mendhp);
SendHPUpdate();
MessageString(Chat::LightBlue, MEND_SUCCESS);
}
else {
/* the purpose of the following is to make the chance to worsen wounds much less common,
which is more consistent with the way eq live works.
according to my math, this should result in the following probability:
0 skill - 25% chance to worsen
20 skill - 23% chance to worsen
50 skill - 16% chance to worsen */
if ((GetSkill(EQ::skills::SkillMend) <= 75) && (zone->random.Int(GetSkill(EQ::skills::SkillMend), 100) < 75) && (zone->random.Int(1, 3) == 1))
{
SetHP(currenthp > mendhp ? (GetHP() - mendhp) : 1);
SendHPUpdate();
MessageString(Chat::LightBlue, MEND_WORSEN);
}
else
MessageString(Chat::LightBlue, MEND_FAIL);
}
CheckIncreaseSkill(EQ::skills::SkillMend, nullptr, 10);
return;
}
void Client::Handle_OP_MercenaryCommand(const EQApplicationPacket *app)
{
if (app->size != sizeof(MercenaryCommand_Struct))
{
Message(Chat::Red, "Size mismatch in OP_MercenaryCommand expected %i got %i", sizeof(MercenaryCommand_Struct), app->size);
LogDebug("Size mismatch in OP_MercenaryCommand expected [{}] got [{}]", sizeof(MercenaryCommand_Struct), app->size);
DumpPacket(app);
return;
}
MercenaryCommand_Struct* mc = (MercenaryCommand_Struct*)app->pBuffer;
uint32 merc_command = mc->MercCommand; // Seen 0 (zone in with no merc or suspended), 1 (dismiss merc), 5 (normal state), 20 (unknown), 36 (zone in with merc)
int32 option = mc->Option; // Seen -1 (zone in with no merc), 0 (setting to passive stance), 1 (normal or setting to balanced stance)
Log(Logs::General, Logs::Mercenaries, "Command %i, Option %i received from %s.", merc_command, option, GetName());
if (!RuleB(Mercs, AllowMercs))
return;
// Handle the Command here...
// Will need a list of what every type of command is supposed to do
// Unsure if there is a server response to this packet
if (option >= 0)
{
Merc* merc = GetMerc();
GetMercInfo().State = option;
if (merc)
{
uint8 numStances = 0;
//get number of available stances for the current merc
std::list<MercStanceInfo> mercStanceList = zone->merc_stance_list[merc->GetMercTemplateID()];
auto iter = mercStanceList.begin();
while (iter != mercStanceList.end()) {
numStances++;
++iter;
}
MercTemplate* mercTemplate = zone->GetMercTemplate(GetMerc()->GetMercTemplateID());
if (mercTemplate)
{
//check to see if selected option is a valid stance slot (option is the slot the stance is in, not the actual stance)
if (option >= 0 && option < numStances)
{
merc->SetStance((EQ::constants::StanceType)mercTemplate->Stances[option]);
GetMercInfo().Stance = mercTemplate->Stances[option];
Log(Logs::General, Logs::Mercenaries, "Set Stance: %u for %s (%s)", merc->GetStance(), merc->GetName(), GetName());
}
}
}
}
}
void Client::Handle_OP_MercenaryDataRequest(const EQApplicationPacket *app)
{
// The payload is 4 bytes. The EntityID of the Mercenary Liason which are of class 71.
if (app->size != sizeof(MercenaryMerchantShopRequest_Struct))
{
LogDebug("Size mismatch in OP_MercenaryDataRequest expected 4 got [{}]", app->size);
DumpPacket(app);
return;
}
MercenaryMerchantShopRequest_Struct* mmsr = (MercenaryMerchantShopRequest_Struct*)app->pBuffer;
uint32 merchant_id = mmsr->MercMerchantID;
uint32 altCurrentType = 19;
Log(Logs::General, Logs::Mercenaries, "Data Request for Merchant ID (%i) for %s.", merchant_id, GetName());
//client is requesting data about currently owned mercenary
if (merchant_id == 0) {
//send info about your current merc(s)
if (GetMercInfo().mercid)
{
Log(Logs::General, Logs::Mercenaries, "SendMercPersonalInfo Request for %s.", GetName());
SendMercPersonalInfo();
}
else
{
Log(Logs::General, Logs::Mercenaries, "SendMercPersonalInfo Not Sent - MercID (%i) for %s.", GetMercInfo().mercid, GetName());
}
}
if (!RuleB(Mercs, AllowMercs)) {
return;
}
NPC* tar = entity_list.GetNPCByID(merchant_id);
if (tar) {
int mercTypeCount = 0;
int mercCount = 0;
if (DistanceSquared(m_Position, tar->GetPosition()) > USE_NPC_RANGE2)
return;
if (tar->GetClass() != MERCERNARY_MASTER) {
return;
}
mercTypeCount = tar->GetNumMercTypes(static_cast<unsigned int>(ClientVersion()));
mercCount = tar->GetNumMercs(static_cast<unsigned int>(ClientVersion()));
if (mercCount > MAX_MERC)
return;
std::list<MercType> mercTypeList = tar->GetMercTypesList(static_cast<unsigned int>(ClientVersion()));
std::list<MercData> mercDataList = tar->GetMercsList(static_cast<unsigned int>(ClientVersion()));
int i = 0;
int StanceCount = 0;
for (auto mercListItr = mercDataList.begin(); mercListItr != mercDataList.end(); ++mercListItr) {
auto siter = zone->merc_stance_list[mercListItr->MercTemplateID].begin();
for (siter = zone->merc_stance_list[mercListItr->MercTemplateID].begin(); siter != zone->merc_stance_list[mercListItr->MercTemplateID].end(); ++siter)
{
StanceCount++;
}
}
auto outapp = new EQApplicationPacket(OP_MercenaryDataResponse, sizeof(MercenaryMerchantList_Struct));
MercenaryMerchantList_Struct* mml = (MercenaryMerchantList_Struct*)outapp->pBuffer;
mml->MercTypeCount = mercTypeCount;
if (mercTypeCount > 0)
{
for (auto mercTypeListItr = mercTypeList.begin(); mercTypeListItr != mercTypeList.end();
++mercTypeListItr) {
mml->MercGrades[i] = mercTypeListItr->Type; // DBStringID for Type
i++;
}
}
mml->MercCount = mercCount;
if (mercCount > 0)
{
i = 0;
for (auto mercListIter = mercDataList.begin(); mercListIter != mercDataList.end();
++mercListIter) {
mml->Mercs[i].MercID = mercListIter->MercTemplateID;
mml->Mercs[i].MercType = mercListIter->MercType;
mml->Mercs[i].MercSubType = mercListIter->MercSubType;
mml->Mercs[i].PurchaseCost = RuleB(Mercs, ChargeMercPurchaseCost) ? Merc::CalcPurchaseCost(mercListIter->MercTemplateID, GetLevel(), 0) : 0;
mml->Mercs[i].UpkeepCost = RuleB(Mercs, ChargeMercUpkeepCost) ? Merc::CalcUpkeepCost(mercListIter->MercTemplateID, GetLevel(), 0) : 0;
mml->Mercs[i].Status = 0;
mml->Mercs[i].AltCurrencyCost = RuleB(Mercs, ChargeMercPurchaseCost) ? Merc::CalcPurchaseCost(mercListIter->MercTemplateID, GetLevel(), altCurrentType) : 0;
mml->Mercs[i].AltCurrencyUpkeep = RuleB(Mercs, ChargeMercUpkeepCost) ? Merc::CalcUpkeepCost(mercListIter->MercTemplateID, GetLevel(), altCurrentType) : 0;
mml->Mercs[i].AltCurrencyType = altCurrentType;
mml->Mercs[i].MercUnk01 = 0;
mml->Mercs[i].TimeLeft = -1;
mml->Mercs[i].MerchantSlot = i + 1;
mml->Mercs[i].MercUnk02 = 1;
int mercStanceCount = 0;
auto iter = zone->merc_stance_list[mercListIter->MercTemplateID].begin();
for (iter = zone->merc_stance_list[mercListIter->MercTemplateID].begin(); iter != zone->merc_stance_list[mercListIter->MercTemplateID].end(); ++iter)
{
mercStanceCount++;
}
mml->Mercs[i].StanceCount = mercStanceCount;
mml->Mercs[i].MercUnk03 = 519044964;
mml->Mercs[i].MercUnk04 = 1;
//mml->Mercs[i].MercName;
int stanceindex = 0;
if (mercStanceCount > 0)
{
auto iter2 = zone->merc_stance_list[mercListIter->MercTemplateID].begin();
while (iter2 != zone->merc_stance_list[mercListIter->MercTemplateID].end())
{
mml->Mercs[i].Stances[stanceindex].StanceIndex = stanceindex;
mml->Mercs[i].Stances[stanceindex].Stance = (iter2->StanceID);
stanceindex++;
++iter2;
}
}
i++;
}
}
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_MercenaryDataUpdateRequest(const EQApplicationPacket *app)
{
// The payload is 0 bytes.
if (app->size != 0)
{
Message(Chat::Red, "Size mismatch in OP_MercenaryDataUpdateRequest expected 0 got %i", app->size);
LogDebug("Size mismatch in OP_MercenaryDataUpdateRequest expected 0 got [{}]", app->size);
DumpPacket(app);
return;
}
Log(Logs::General, Logs::Mercenaries, "Data Update Request Received for %s.", GetName());
if (GetMercID())
{
SendMercPersonalInfo();
}
}
void Client::Handle_OP_MercenaryDismiss(const EQApplicationPacket *app)
{
// The payload is 0 or 1 bytes.
if (app->size > 1)
{
Message(Chat::Red, "Size mismatch in OP_MercenaryDismiss expected 0 got %i", app->size);
LogDebug("Size mismatch in OP_MercenaryDismiss expected 0 got [{}]", app->size);
DumpPacket(app);
return;
}
uint8 Command = 0;
if (app->size > 0)
{
char *InBuffer = (char *)app->pBuffer;
Command = VARSTRUCT_DECODE_TYPE(uint8, InBuffer);
}
Log(Logs::General, Logs::Mercenaries, "Dismiss Request ( %i ) Received for %s.", Command, GetName());
// Handle the dismiss here...
DismissMerc(GetMercInfo().mercid);
}
void Client::Handle_OP_MercenaryHire(const EQApplicationPacket *app)
{
// The payload is 16 bytes. First four bytes are the Merc ID (Template ID)
if (app->size != sizeof(MercenaryMerchantRequest_Struct))
{
LogDebug("Size mismatch in OP_MercenaryHire expected [{}] got [{}]", sizeof(MercenaryMerchantRequest_Struct), app->size);
DumpPacket(app);
return;
}
MercenaryMerchantRequest_Struct* mmrq = (MercenaryMerchantRequest_Struct*)app->pBuffer;
uint32 merc_template_id = mmrq->MercID;
uint32 merchant_id = mmrq->MercMerchantID;
uint32 merc_unk1 = mmrq->MercUnk01;
uint32 merc_unk2 = mmrq->MercUnk02;
Log(Logs::General, Logs::Mercenaries, "Template ID (%i), Merchant ID (%i), Unknown1 (%i), Unknown2 (%i), Client: %s", merc_template_id, merchant_id, merc_unk1, merc_unk2, GetName());
//HirePending = true;
SetHoTT(0);
SendTargetCommand(0);
if (!RuleB(Mercs, AllowMercs))
return;
MercTemplate* merc_template = zone->GetMercTemplate(merc_template_id);
if (merc_template)
{
Mob* merchant = entity_list.GetNPCByID(merchant_id);
if (!CheckCanHireMerc(merchant, merc_template_id))
{
return;
}
// Set time remaining to max on Hire
GetMercInfo().MercTimerRemaining = RuleI(Mercs, UpkeepIntervalMS);
// Get merc, assign it to client & spawn
Merc* merc = Merc::LoadMerc(this, merc_template, merchant_id, false);
if (merc)
{
SpawnMerc(merc, true);
merc->Save();
if (RuleB(Mercs, ChargeMercPurchaseCost))
{
uint32 cost = Merc::CalcPurchaseCost(merc_template->MercTemplateID, GetLevel()) * 100; // Cost is in gold
TakeMoneyFromPP(cost, true);
}
// approved hire request
SendMercMerchantResponsePacket(0);
}
else
{
//merc failed to spawn
SendMercMerchantResponsePacket(3);
}
}
else
{
//merc doesn't exist in db
SendMercMerchantResponsePacket(2);
}
}
void Client::Handle_OP_MercenarySuspendRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(SuspendMercenary_Struct))
{
Message(Chat::Red, "Size mismatch in OP_MercenarySuspendRequest expected %i got %i", sizeof(SuspendMercenary_Struct), app->size);
LogDebug("Size mismatch in OP_MercenarySuspendRequest expected [{}] got [{}]", sizeof(SuspendMercenary_Struct), app->size);
DumpPacket(app);
return;
}
SuspendMercenary_Struct* sm = (SuspendMercenary_Struct*)app->pBuffer;
uint32 merc_suspend = sm->SuspendMerc; // Seen 30 for suspending or unsuspending
Log(Logs::General, Logs::Mercenaries, "Suspend ( %i ) received for %s.", merc_suspend, GetName());
if (!RuleB(Mercs, AllowMercs))
return;
// Check if the merc is suspended and if so, unsuspend, otherwise suspend it
SuspendMercCommand();
}
void Client::Handle_OP_MercenaryTimerRequest(const EQApplicationPacket *app)
{
// The payload is 0 bytes.
if (app->size > 1)
{
Message(Chat::Red, "Size mismatch in OP_MercenaryTimerRequest expected 0 got %i", app->size);
LogDebug("Size mismatch in OP_MercenaryTimerRequest expected 0 got [{}]", app->size);
DumpPacket(app);
return;
}
Log(Logs::General, Logs::Mercenaries, "Timer Request received for %s.", GetName());
if (!RuleB(Mercs, AllowMercs)) {
return;
}
// To Do: Load Mercenary Timer Data to properly populate this reply packet
// All hard set values for now
uint32 entityID = 0;
uint32 mercState = 5;
uint32 suspendedTime = 0;
if (GetMercID()) {
Merc* merc = GetMerc();
if (merc) {
entityID = merc->GetID();
if (GetMercInfo().IsSuspended) {
mercState = 1;
suspendedTime = GetMercInfo().SuspendedTime;
}
}
}
if (entityID > 0) {
SendMercTimerPacket(entityID, mercState, suspendedTime, GetMercInfo().MercTimerRemaining, RuleI(Mercs, SuspendIntervalMS));
}
}
void Client::Handle_OP_MoveCoin(const EQApplicationPacket *app)
{
if (app->size != sizeof(MoveCoin_Struct)) {
LogError("Wrong size on OP_MoveCoin. Got: [{}], Expected: [{}]", app->size, sizeof(MoveCoin_Struct));
DumpPacket(app);
return;
}
OPMoveCoin(app);
return;
}
void Client::Handle_OP_MoveItem(const EQApplicationPacket *app)
{
if (!CharacterID())
{
return;
}
if (app->size != sizeof(MoveItem_Struct)) {
LogError("Wrong size: OP_MoveItem, size=[{}], expected [{}]", app->size, sizeof(MoveItem_Struct));
return;
}
MoveItem_Struct* mi = (MoveItem_Struct*)app->pBuffer;
if (spellend_timer.Enabled() && casting_spell_id && !IsBardSong(casting_spell_id))
{
if (mi->from_slot != mi->to_slot && (mi->from_slot <= EQ::invslot::GENERAL_END || mi->from_slot > 39) && IsValidSlot(mi->from_slot) && IsValidSlot(mi->to_slot))
{
const EQ::ItemInstance *itm_from = GetInv().GetItem(mi->from_slot);
const EQ::ItemInstance *itm_to = GetInv().GetItem(mi->to_slot);
auto detect = fmt::format("Player issued a move item from {}(item id {}) to {}(item id {}) while casting {}.",
mi->from_slot,
itm_from ? itm_from->GetID() : 0,
mi->to_slot,
itm_to ? itm_to->GetID() : 0,
casting_spell_id);
database.SetMQDetectionFlag(AccountName(), GetName(), detect, zone->GetShortName());
Kick("Inventory desync"); // Kick client to prevent client and server from getting out-of-sync inventory slots
return;
}
}
// Illegal bagslot usage checks. Currently, user only receives a message if this check is triggered.
bool mi_hack = false;
if (mi->from_slot >= EQ::invbag::GENERAL_BAGS_BEGIN && mi->from_slot <= EQ::invbag::CURSOR_BAG_END) {
if (mi->from_slot >= EQ::invbag::CURSOR_BAG_BEGIN) { mi_hack = true; }
else {
int16 from_parent = m_inv.CalcSlotId(mi->from_slot);
if (!m_inv[from_parent]) { mi_hack = true; }
else if (!m_inv[from_parent]->IsClassBag()) { mi_hack = true; }
else if (m_inv.CalcBagIdx(mi->from_slot) >= m_inv[from_parent]->GetItem()->BagSlots) { mi_hack = true; }
}
}
if (mi->to_slot >= EQ::invbag::GENERAL_BAGS_BEGIN && mi->to_slot <= EQ::invbag::CURSOR_BAG_END) {
if (mi->to_slot >= EQ::invbag::CURSOR_BAG_BEGIN) { mi_hack = true; }
else {
int16 to_parent = m_inv.CalcSlotId(mi->to_slot);
if (!m_inv[to_parent]) { mi_hack = true; }
else if (!m_inv[to_parent]->IsClassBag()) { mi_hack = true; }
else if (m_inv.CalcBagIdx(mi->to_slot) >= m_inv[to_parent]->GetItem()->BagSlots) { mi_hack = true; }
}
}
if (mi_hack) { Message(Chat::Yellow, "Caution: Illegal use of inaccessible bag slots!"); }
if (!SwapItem(mi) && IsValidSlot(mi->from_slot) && IsValidSlot(mi->to_slot)) {
SwapItemResync(mi);
bool error = false;
InterrogateInventory(this, false, true, false, error, false);
if (error)
InterrogateInventory(this, true, false, true, error);
}
return;
}
void Client::Handle_OP_MoveMultipleItems(const EQApplicationPacket *app)
{
Kick("Unimplemented move multiple items"); // TODO: lets not desync though
}
void Client::Handle_OP_OpenContainer(const EQApplicationPacket *app)
{
// Does not exist in Ti client
// SoF, SoD and UF clients send a 4-byte packet indicating the 'parent' slot
// SoF, SoD and UF slots are defined by a uint32 value and currently untranslated
// RoF client sends a 12-byte packet based on the RoF::Structs::ItemSlotStruct
// RoF structure types are defined as signed uint16 and currently untranslated
// RoF::struct.SlotType = {0 - Equipment, 1 - Bank, 2 - Shared Bank} // not tested beyond listed types
// RoF::struct.Unknown2 = 0
// RoF::struct.MainSlot = { <parent slot range designated by slottype..zero-based> }
// RoF::struct.SubSlot = -1 (non-child)
// RoF::struct.AugSlot = -1 (non-child)
// RoF::struct.Unknown1 = 141 (unsure why, but always appears to be this value..combine containers not tested)
// SideNote: Watching the slot translations, Unknown1 is showing '141' as well on certain item swaps.
// Manually looting a corpse results in a from '34' to '68' value for equipment items, '0' to '0' for inventory.
}
void Client::Handle_OP_OpenGuildTributeMaster(const EQApplicationPacket *app)
{
LogTribute("Received OP_OpenGuildTributeMaster of length [{}]", app->size);
if (app->size != sizeof(StartTribute_Struct))
printf("Error in OP_OpenGuildTributeMaster. Expected size of: %zu, but got: %i\n", sizeof(StartTribute_Struct), app->size);
else {
//Opens the guild tribute master window
StartTribute_Struct* st = (StartTribute_Struct*)app->pBuffer;
Mob* tribmast = entity_list.GetMob(st->tribute_master_id);
if (tribmast && tribmast->IsNPC() && tribmast->GetClass() == GUILD_TRIBUTE_MASTER
&& DistanceSquared(m_Position, tribmast->GetPosition()) <= USE_NPC_RANGE2) {
st->response = 1;
QueuePacket(app);
tribute_master_id = st->tribute_master_id;
DoTributeUpdate();
}
else {
st->response = 0;
QueuePacket(app);
}
}
return;
}
void Client::Handle_OP_OpenInventory(const EQApplicationPacket *app)
{
// Does not exist in Ti, UF or RoF clients
// SoF and SoD both send a 4-byte packet with a uint32 value of '8'
}
void Client::Handle_OP_OpenTributeMaster(const EQApplicationPacket *app)
{
LogTribute("Received OP_OpenTributeMaster of length [{}]", app->size);
if (app->size != sizeof(StartTribute_Struct))
printf("Error in OP_OpenTributeMaster. Expected size of: %zu, but got: %i\n", sizeof(StartTribute_Struct), app->size);
else {
//Opens the tribute master window
StartTribute_Struct* st = (StartTribute_Struct*)app->pBuffer;
Mob* tribmast = entity_list.GetMob(st->tribute_master_id);
if (tribmast && tribmast->IsNPC() && tribmast->GetClass() == TRIBUTE_MASTER
&& DistanceSquared(m_Position, tribmast->GetPosition()) <= USE_NPC_RANGE2) {
st->response = 1;
QueuePacket(app);
tribute_master_id = st->tribute_master_id;
DoTributeUpdate();
}
else {
st->response = 0;
QueuePacket(app);
}
}
return;
}
void Client::Handle_OP_PDeletePetition(const EQApplicationPacket *app)
{
if (app->size < 2) {
LogError("Wrong size: OP_PDeletePetition, size=[{}], expected [{}]", app->size, 2);
return;
}
if (petition_list.DeletePetitionByCharName((char*)app->pBuffer))
MessageString(Chat::White, PETITION_DELETED);
else
MessageString(Chat::White, PETITION_NO_DELETE);
return;
}
void Client::Handle_OP_PetCommands(const EQApplicationPacket *app)
{
if (app->size != sizeof(PetCommand_Struct)) {
LogError("Wrong size: OP_PetCommands, size=[{}], expected [{}]", app->size, sizeof(PetCommand_Struct));
return;
}
char val1[20] = { 0 };
PetCommand_Struct* pet = (PetCommand_Struct*)app->pBuffer;
Mob* mypet = this->GetPet();
Mob *target = entity_list.GetMob(pet->target);
if (!mypet || pet->command == PET_LEADER) {
if (pet->command == PET_LEADER) {
// we either send the ID of an NPC we're interested in or no ID for our own pet
if (target) {
auto owner = target->GetOwner();
if (owner)
target->SayString(PET_LEADERIS, owner->GetCleanName());
else
target->SayString(I_FOLLOW_NOONE);
} else if (mypet) {
mypet->SayString(PET_LEADERIS, GetName());
}
}
return;
}
if (mypet->GetPetType() == petTargetLock && (pet->command != PET_HEALTHREPORT && pet->command != PET_GETLOST))
return;
// just let the command "/pet get lost" work for familiars
if (mypet->GetPetType() == petFamiliar && pet->command != PET_GETLOST)
return;
uint32 PetCommand = pet->command;
// Handle Sit/Stand toggle in UF and later.
/*
if (GetClientVersion() >= EQClientUnderfoot)
{
if (PetCommand == PET_SITDOWN)
if (mypet->GetPetOrder() == SPO_Sit)
PetCommand = PET_STANDUP;
}
*/
switch (PetCommand)
{
case PET_ATTACK: {
if (!target)
break;
if (target->IsMezzed()) {
MessageString(Chat::NPCQuestSay, CANNOT_WAKE, mypet->GetCleanName(), target->GetCleanName());
break;
}
if (mypet->IsFeared())
break; //prevent pet from attacking stuff while feared
if (!mypet->IsAttackAllowed(target)) {
mypet->SayString(this, NOT_LEGAL_TARGET);
break;
}
// default range is 200, takes Z into account
// really they do something weird where they're added to the aggro list then remove them
// and will attack if they come in range -- too lazy, lets remove exploits for now
if (DistanceSquared(mypet->GetPosition(), target->GetPosition()) >= RuleR(Aggro, PetAttackRange)) {
// they say they're attacking then remove on live ... so they don't really say anything in this case ...
break;
}
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (target != this && DistanceSquaredNoZ(mypet->GetPosition(), target->GetPosition()) <= (RuleR(Pets, AttackCommandRange)*RuleR(Pets, AttackCommandRange))) {
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
}
// fix GUI sit button to be unpressed and stop sitting regen
SetPetCommandState(PET_BUTTON_SIT, 0);
mypet->SetAppearance(eaStanding);
zone->AddAggroMob();
// classic acts like qattack
int hate = 1;
if (mypet->IsEngaged()) {
auto top = mypet->GetHateMost();
if (top && top != target)
hate += mypet->GetHateAmount(top) - mypet->GetHateAmount(target) + 100; // should be enough to cause target change
}
mypet->AddToHateList(target, hate, 0, true, false, false, SPELL_UNKNOWN, true);
MessageString(Chat::PetResponse, PET_ATTACKING, mypet->GetCleanName(), target->GetCleanName());
SetTarget(target);
}
}
break;
}
case PET_QATTACK: {
if (mypet->IsFeared())
break; //prevent pet from attacking stuff while feared
if (!GetTarget())
break;
if (GetTarget()->IsMezzed()) {
MessageString(Chat::NPCQuestSay, CANNOT_WAKE, mypet->GetCleanName(), GetTarget()->GetCleanName());
break;
}
if (!mypet->IsAttackAllowed(GetTarget())) {
mypet->SayString(this, NOT_LEGAL_TARGET);
break;
}
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (GetTarget() != this && DistanceSquaredNoZ(mypet->GetPosition(), GetTarget()->GetPosition()) <= (RuleR(Pets, AttackCommandRange)*RuleR(Pets, AttackCommandRange))) {
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
}
// fix GUI sit button to be unpressed and stop sitting regen
SetPetCommandState(PET_BUTTON_SIT, 0);
mypet->SetAppearance(eaStanding);
zone->AddAggroMob();
mypet->AddToHateList(GetTarget(), 1, 0, true, false, false, SPELL_UNKNOWN, true);
MessageString(Chat::PetResponse, PET_ATTACKING, mypet->GetCleanName(), GetTarget()->GetCleanName());
}
}
break;
}
case PET_BACKOFF: {
if (mypet->IsFeared()) break; //keeps pet running while feared
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_CALMING);
mypet->WipeHateList();
mypet->SetTarget(nullptr);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
break;
}
case PET_HEALTHREPORT: {
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
MessageString(Chat::PetResponse, PET_REPORT_HP, ConvertArrayF(mypet->GetHPRatio(), val1));
mypet->ShowBuffList(this);
}
break;
}
case PET_GETLOST: {
if (mypet->Charmed())
break;
if (mypet->GetPetType() == petCharmed || !mypet->IsNPC()) {
// eqlive ignores this command
// we could just remove the charm
// and continue
mypet->BuffFadeByEffect(SE_Charm);
break;
}
else {
SetPet(nullptr);
}
mypet->SayString(this, Chat::PetResponse, PET_GETLOST_STRING);
mypet->CastToNPC()->Depop();
//Oddly, the client (Titanium) will still allow "/pet get lost" command despite me adding the code below. If someone can figure that out, you can uncomment this code and use it.
/*
if((mypet->GetPetType() == petAnimation && GetAA(aaAnimationEmpathy) >= 2) || mypet->GetPetType() != petAnimation) {
mypet->SayString(PET_GETLOST_STRING);
mypet->CastToNPC()->Depop();
}
*/
break;
}
case PET_GUARDHERE: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (mypet->IsNPC()) {
// Set Sit button to unpressed - send stand anim/end hpregen
SetPetCommandState(PET_BUTTON_SIT, 0);
mypet->SendAppearancePacket(AT_Anim, ANIM_STAND);
mypet->SayString(this, Chat::PetResponse, PET_GUARDINGLIFE);
mypet->SetPetOrder(SPO_Guard);
mypet->CastToNPC()->SaveGuardSpot(mypet->GetPosition());
if (!mypet->GetTarget()) // want them to not twitch if they're chasing something down
mypet->StopNavigation();
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
}
break;
}
case PET_FOLLOWME: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_FOLLOWING);
mypet->SetPetOrder(SPO_Follow);
// fix GUI sit button to be unpressed - send stand anim/end hpregen
SetPetCommandState(PET_BUTTON_SIT, 0);
mypet->SendAppearancePacket(AT_Anim, ANIM_STAND);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
break;
}
case PET_TAUNT: {
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (mypet->CastToNPC()->IsTaunting())
{
MessageString(Chat::PetResponse, PET_NO_TAUNT);
mypet->CastToNPC()->SetTaunting(false);
}
else
{
MessageString(Chat::PetResponse, PET_DO_TAUNT);
mypet->CastToNPC()->SetTaunting(true);
}
}
break;
}
case PET_TAUNT_ON: {
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
MessageString(Chat::PetResponse, PET_DO_TAUNT);
mypet->CastToNPC()->SetTaunting(true);
}
break;
}
case PET_TAUNT_OFF: {
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
MessageString(Chat::PetResponse, PET_NO_TAUNT);
mypet->CastToNPC()->SetTaunting(false);
}
break;
}
case PET_GUARDME: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_GUARDME_STRING);
mypet->SetPetOrder(SPO_Follow);
// Set Sit button to unpressed - send stand anim/end hpregen
SetPetCommandState(PET_BUTTON_SIT, 0);
mypet->SendAppearancePacket(AT_Anim, ANIM_STAND);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
break;
}
case PET_SIT: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (mypet->GetPetOrder() == SPO_Sit)
{
mypet->SayString(this, Chat::PetResponse, PET_SIT_STRING);
mypet->SetPetOrder(SPO_Follow);
mypet->SendAppearancePacket(AT_Anim, ANIM_STAND);
}
else
{
mypet->SayString(this, Chat::PetResponse, PET_SIT_STRING);
mypet->SetPetOrder(SPO_Sit);
mypet->SetRunAnimSpeed(0);
if (!mypet->UseBardSpellLogic()) //maybe we can have a bard pet
mypet->InterruptSpell(); //No cast 4 u. //i guess the pet should start casting
mypet->SendAppearancePacket(AT_Anim, ANIM_SIT);
}
}
break;
}
case PET_STANDUP: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_SIT_STRING);
SetPetCommandState(PET_BUTTON_SIT, 0);
mypet->SetPetOrder(SPO_Follow);
mypet->SendAppearancePacket(AT_Anim, ANIM_STAND);
}
break;
}
case PET_SITDOWN: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_SIT_STRING);
SetPetCommandState(PET_BUTTON_SIT, 1);
mypet->SetPetOrder(SPO_Sit);
mypet->SetRunAnimSpeed(0);
if (!mypet->UseBardSpellLogic()) //maybe we can have a bard pet
mypet->InterruptSpell(); //No cast 4 u. //i guess the pet should start casting
mypet->SendAppearancePacket(AT_Anim, ANIM_SIT);
}
break;
}
case PET_HOLD: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsHeld())
{
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_HOLD_SET_OFF);
mypet->SetHeld(false);
}
else
{
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_HOLD_SET_ON);
if (m_ClientVersionBit & EQ::versions::maskUFAndLater)
mypet->SayString(this, Chat::PetResponse, PET_NOW_HOLDING);
else
mypet->SayString(this, Chat::PetResponse, PET_ON_HOLD);
mypet->SetHeld(true);
}
mypet->SetGHeld(false);
SetPetCommandState(PET_BUTTON_GHOLD, 0);
}
break;
}
case PET_HOLD_ON: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC() && !mypet->IsHeld()) {
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_HOLD_SET_ON);
if (m_ClientVersionBit & EQ::versions::maskUFAndLater)
mypet->SayString(this, Chat::PetResponse, PET_NOW_HOLDING);
else
mypet->SayString(this, Chat::PetResponse, PET_ON_HOLD);
mypet->SetHeld(true);
mypet->SetGHeld(false);
SetPetCommandState(PET_BUTTON_GHOLD, 0);
}
break;
}
case PET_HOLD_OFF: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC() && mypet->IsHeld()) {
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_HOLD_SET_OFF);
mypet->SetHeld(false);
}
break;
}
case PET_GHOLD: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsGHeld())
{
if (m_ClientVersionBit & EQ::versions::maskUFAndLater)
MessageString(Chat::PetResponse, PET_OFF_GHOLD);
mypet->SetGHeld(false);
}
else
{
if (m_ClientVersionBit & EQ::versions::maskUFAndLater) {
MessageString(Chat::PetResponse, PET_ON_GHOLD);
mypet->SayString(this, Chat::PetResponse, PET_GHOLD_ON_MSG);
} else {
mypet->SayString(this, Chat::PetResponse, PET_ON_HOLD);
}
mypet->SetGHeld(true);
}
mypet->SetHeld(false);
SetPetCommandState(PET_BUTTON_HOLD, 0);
}
break;
}
case PET_GHOLD_ON: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (m_ClientVersionBit & EQ::versions::maskUFAndLater) {
MessageString(Chat::PetResponse, PET_ON_GHOLD);
mypet->SayString(this, Chat::PetResponse, PET_GHOLD_ON_MSG);
} else {
mypet->SayString(this, Chat::PetResponse, PET_ON_HOLD);
}
mypet->SetGHeld(true);
mypet->SetHeld(false);
SetPetCommandState(PET_BUTTON_HOLD, 0);
}
break;
}
case PET_GHOLD_OFF: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC() && mypet->IsGHeld()) {
if (m_ClientVersionBit & EQ::versions::maskUFAndLater)
MessageString(Chat::PetResponse, PET_OFF_GHOLD);
mypet->SetGHeld(false);
}
break;
}
case PET_SPELLHOLD: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (mypet->IsNoCast()) {
MessageString(Chat::PetResponse, PET_CASTING);
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_SPELLHOLD_SET_OFF);
mypet->SetNoCast(false);
}
else {
MessageString(Chat::PetResponse, PET_NOT_CASTING);
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_SPELLHOLD_SET_ON);
mypet->SetNoCast(true);
}
}
break;
}
case PET_SPELLHOLD_ON: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (!mypet->IsNoCast()) {
MessageString(Chat::PetResponse, PET_NOT_CASTING);
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_SPELLHOLD_SET_ON);
mypet->SetNoCast(true);
}
}
break;
}
case PET_SPELLHOLD_OFF: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (mypet->IsNoCast()) {
MessageString(Chat::PetResponse, PET_CASTING);
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_SPELLHOLD_SET_OFF);
mypet->SetNoCast(false);
}
}
break;
}
case PET_FOCUS: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (mypet->IsFocused()) {
MessageString(Chat::PetResponse, PET_NOT_FOCUSING);
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_FOCUS_SET_OFF);
mypet->SetFocused(false);
}
else {
MessageString(Chat::PetResponse, PET_NOW_FOCUSING);
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_FOCUS_SET_ON);
mypet->SetFocused(true);
}
}
break;
}
case PET_FOCUS_ON: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (!mypet->IsFocused()) {
MessageString(Chat::PetResponse, PET_NOW_FOCUSING);
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_FOCUS_SET_ON);
mypet->SetFocused(true);
}
}
break;
}
case PET_FOCUS_OFF: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (mypet->IsFocused()) {
MessageString(Chat::PetResponse, PET_NOT_FOCUSING);
if (m_ClientVersionBit & EQ::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_FOCUS_SET_OFF);
mypet->SetFocused(false);
}
}
break;
}
case PET_STOP: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
} else {
mypet->SetPetStop(true);
mypet->StopNavigation();
mypet->SetTarget(nullptr);
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
}
}
mypet->SayString(this, Chat::PetResponse, PET_GETLOST_STRING);
}
break;
}
case PET_STOP_ON: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SetPetStop(true);
mypet->StopNavigation();
mypet->SetTarget(nullptr);
mypet->SayString(this, Chat::PetResponse, PET_GETLOST_STRING);
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
}
}
break;
}
case PET_STOP_OFF: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SetPetStop(false);
mypet->SayString(this, Chat::PetResponse, PET_GETLOST_STRING);
}
break;
}
case PET_REGROUP: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if (aabonuses.PetCommands[PetCommand]) {
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
mypet->SayString(this, Chat::PetResponse, PET_OFF_REGROUPING);
} else {
mypet->SetPetRegroup(true);
mypet->SetTarget(nullptr);
mypet->SayString(this, Chat::PetResponse, PET_ON_REGROUPING);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
}
break;
}
case PET_REGROUP_ON: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if (aabonuses.PetCommands[PetCommand]) {
mypet->SetPetRegroup(true);
mypet->SetTarget(nullptr);
mypet->SayString(this, Chat::PetResponse, PET_ON_REGROUPING);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
break;
}
case PET_REGROUP_OFF: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if (aabonuses.PetCommands[PetCommand]) {
mypet->SetPetRegroup(false);
mypet->SayString(this, Chat::PetResponse, PET_OFF_REGROUPING);
}
break;
}
default:
printf("Client attempted to use a unknown pet command:\n");
break;
}
}
void Client::Handle_OP_Petition(const EQApplicationPacket *app)
{
if (app->size <= 1)
return;
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
/*else if(petition_list.FindPetitionByAccountName(this->AccountName()))
{
Message(0,"You already have a petition in queue, you cannot petition again until this one has been responded to or you have deleted the petition.");
return;
}*/
else
{
if (petition_list.FindPetitionByAccountName(AccountName()))
{
Message(0, "You already have a petition in the queue, you must wait for it to be answered or use /deletepetition to delete it.");
return;
}
auto pet = new Petition(CharacterID());
pet->SetAName(this->AccountName());
pet->SetClass(this->GetClass());
pet->SetLevel(this->GetLevel());
pet->SetCName(this->GetName());
pet->SetRace(this->GetRace());
pet->SetLastGM("");
pet->SetCName(this->GetName());
pet->SetPetitionText((char*)app->pBuffer);
pet->SetZone(zone->GetZoneID());
pet->SetUrgency(0);
petition_list.AddPetition(pet);
database.InsertPetitionToDB(pet);
petition_list.UpdateGMQueue();
petition_list.UpdateZoneListQueue();
worldserver.SendEmoteMessage(0, 0, 80, 15, "%s has made a petition. #%i", GetName(), pet->GetID());
}
return;
}
void Client::Handle_OP_PetitionBug(const EQApplicationPacket *app)
{
Message(0, "Petition Bugs are not supported, please use /bug.");
return;
}
void Client::Handle_OP_PetitionCheckIn(const EQApplicationPacket *app)
{
if (app->size != sizeof(Petition_Struct)) {
LogError("Wrong size: OP_PetitionCheckIn, size=[{}], expected [{}]", app->size, sizeof(Petition_Struct));
return;
}
Petition_Struct* inpet = (Petition_Struct*)app->pBuffer;
Petition* pet = petition_list.GetPetitionByID(inpet->petnumber);
//if (inpet->urgency != pet->GetUrgency())
pet->SetUrgency(inpet->urgency);
pet->SetLastGM(this->GetName());
pet->SetGMText(inpet->gmtext);
pet->SetCheckedOut(false);
petition_list.UpdatePetition(pet);
petition_list.UpdateGMQueue();
petition_list.UpdateZoneListQueue();
return;
}
void Client::Handle_OP_PetitionCheckout(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
std::cout << "Wrong size: OP_PetitionCheckout, size=" << app->size << ", expected " << sizeof(uint32) << std::endl;
return;
}
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
uint32 getpetnum = *((uint32*)app->pBuffer);
Petition* getpet = petition_list.GetPetitionByID(getpetnum);
if (getpet != 0) {
getpet->AddCheckout();
getpet->SetCheckedOut(true);
getpet->SendPetitionToPlayer(this->CastToClient());
petition_list.UpdatePetition(getpet);
petition_list.UpdateGMQueue();
petition_list.UpdateZoneListQueue();
}
}
return;
}
void Client::Handle_OP_PetitionDelete(const EQApplicationPacket *app)
{
if (app->size != sizeof(PetitionUpdate_Struct)) {
LogError("Wrong size: OP_PetitionDelete, size=[{}], expected [{}]", app->size, sizeof(PetitionUpdate_Struct));
return;
}
auto outapp = new EQApplicationPacket(OP_PetitionUpdate, sizeof(PetitionUpdate_Struct));
PetitionUpdate_Struct* pet = (PetitionUpdate_Struct*)outapp->pBuffer;
pet->petnumber = *((int*)app->pBuffer);
pet->color = 0x00;
pet->status = 0xFFFFFFFF;
pet->senttime = 0;
strcpy(pet->accountid, "");
strcpy(pet->gmsenttoo, "");
pet->quetotal = petition_list.GetTotalPetitions();
strcpy(pet->charname, "");
FastQueuePacket(&outapp);
if (petition_list.DeletePetition(pet->petnumber) == -1)
std::cout << "Something is borked with: " << pet->petnumber << std::endl;
petition_list.ClearPetitions();
petition_list.UpdateGMQueue();
petition_list.ReadDatabase();
petition_list.UpdateZoneListQueue();
return;
}
void Client::Handle_OP_PetitionQue(const EQApplicationPacket *app)
{
#ifdef _EQDEBUG
printf("%s looking at petitions..\n", this->GetName());
#endif
return;
}
void Client::Handle_OP_PetitionRefresh(const EQApplicationPacket *app)
{
// This is When Client Asks for Petition Again and Again...
// break is here because it floods the zones and causes lag if it
// Were to actually do something:P We update on our own schedule now.
return;
}
void Client::Handle_OP_PetitionResolve(const EQApplicationPacket *app)
{
Handle_OP_PetitionDelete(app);
}
void Client::Handle_OP_PetitionUnCheckout(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
std::cout << "Wrong size: OP_PetitionUnCheckout, size=" << app->size << ", expected " << sizeof(uint32) << std::endl;
return;
}
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
uint32 getpetnum = *((uint32*)app->pBuffer);
Petition* getpet = petition_list.GetPetitionByID(getpetnum);
if (getpet != 0) {
getpet->SetCheckedOut(false);
petition_list.UpdatePetition(getpet);
petition_list.UpdateGMQueue();
petition_list.UpdateZoneListQueue();
}
}
return;
}
void Client::Handle_OP_PlayerStateAdd(const EQApplicationPacket *app)
{
if (app->size != sizeof(PlayerState_Struct)) {
std::cout << "Wrong size: OP_PlayerStateAdd, size=" << app->size << ", expected " << sizeof(PlayerState_Struct) << std::endl;
return;
}
PlayerState_Struct *ps = (PlayerState_Struct *)app->pBuffer;
AddPlayerState(ps->state);
entity_list.QueueClients(this, app, true);
}
void Client::Handle_OP_PlayerStateRemove(const EQApplicationPacket *app)
{
if (app->size != sizeof(PlayerState_Struct)) {
std::cout << "Wrong size: OP_PlayerStateRemove, size=" << app->size << ", expected " << sizeof(PlayerState_Struct) << std::endl;
return;
}
PlayerState_Struct *ps = (PlayerState_Struct *)app->pBuffer;
RemovePlayerState(ps->state);
entity_list.QueueClients(this, app, true);
}
void Client::Handle_OP_PickPocket(const EQApplicationPacket *app)
{
if (app->size != sizeof(PickPocket_Struct))
{
LogError("Size mismatch for Pick Pocket packet");
DumpPacket(app);
}
if (!HasSkill(EQ::skills::SkillPickPockets))
{
return;
}
if (!p_timers.Expired(&database, pTimerBeggingPickPocket, false))
{
Message(Chat::Red, "Ability recovery time not yet met.");
database.SetMQDetectionFlag(this->AccountName(), this->GetName(), "OP_PickPocket was sent again too quickly.", zone->GetShortName());
return;
}
PickPocket_Struct* pick_in = (PickPocket_Struct*)app->pBuffer;
Mob* victim = entity_list.GetMob(pick_in->to);
if (!victim)
return;
p_timers.Start(pTimerBeggingPickPocket, 8);
if (victim == this) {
Message(0, "You catch yourself red-handed.");
auto outapp = new EQApplicationPacket(OP_PickPocket, sizeof(sPickPocket_Struct));
sPickPocket_Struct* pick_out = (sPickPocket_Struct*)outapp->pBuffer;
pick_out->coin = 0;
pick_out->from = victim->GetID();
pick_out->to = GetID();
pick_out->myskill = GetSkill(EQ::skills::SkillPickPockets);
pick_out->type = 0;
//if we do not send this packet the client will lock up and require the player to relog.
QueuePacket(outapp);
safe_delete(outapp);
}
else if (victim->GetOwnerID()) {
Message(0, "You cannot steal from pets!");
auto outapp = new EQApplicationPacket(OP_PickPocket, sizeof(sPickPocket_Struct));
sPickPocket_Struct* pick_out = (sPickPocket_Struct*)outapp->pBuffer;
pick_out->coin = 0;
pick_out->from = victim->GetID();
pick_out->to = GetID();
pick_out->myskill = GetSkill(EQ::skills::SkillPickPockets);
pick_out->type = 0;
//if we do not send this packet the client will lock up and require the player to relog.
QueuePacket(outapp);
safe_delete(outapp);
}
else if (victim->IsNPC()) {
victim->CastToNPC()->PickPocket(this);
}
else {
Message(0, "Stealing from clients not yet supported.");
auto outapp = new EQApplicationPacket(OP_PickPocket, sizeof(sPickPocket_Struct));
sPickPocket_Struct* pick_out = (sPickPocket_Struct*)outapp->pBuffer;
pick_out->coin = 0;
pick_out->from = victim->GetID();
pick_out->to = GetID();
pick_out->myskill = GetSkill(EQ::skills::SkillPickPockets);
pick_out->type = 0;
//if we do not send this packet the client will lock up and require the player to relog.
QueuePacket(outapp);
safe_delete(outapp);
}
}
void Client::Handle_OP_PopupResponse(const EQApplicationPacket *app)
{
if (app->size != sizeof(PopupResponse_Struct)) {
LogDebug("Size mismatch in OP_PopupResponse expected [{}] got [{}]", sizeof(PopupResponse_Struct), app->size);
DumpPacket(app);
return;
}
PopupResponse_Struct *popup_response = (PopupResponse_Struct *) app->pBuffer;
/**
* Handle any EQEmu defined popup Ids first
*/
std::string response;
switch (popup_response->popupid) {
case POPUPID_UPDATE_SHOWSTATSWINDOW:
if (GetTarget() && GetTarget()->IsClient()) {
GetTarget()->CastToClient()->SendStatsWindow(this, true);
}
else {
SendStatsWindow(this, true);
}
return;
break;
case POPUPID_DIAWIND_ONE:
if (EntityVariableExists(DIAWIND_RESPONSE_ONE_KEY.c_str())) {
response = GetEntityVariable(DIAWIND_RESPONSE_ONE_KEY.c_str());
if (!response.empty()) {
ChannelMessageReceived(8, 0, 100, response.c_str());
}
}
break;
case POPUPID_DIAWIND_TWO:
if (EntityVariableExists(DIAWIND_RESPONSE_TWO_KEY.c_str())) {
response = GetEntityVariable(DIAWIND_RESPONSE_TWO_KEY.c_str());
if (!response.empty()) {
ChannelMessageReceived(8, 0, 100, response.c_str());
}
}
break;
case EQ::popupresponse::MOB_INFO_DISMISS:
SetDisplayMobInfoWindow(false);
Message(Chat::Yellow, "[DevTools] Window snoozed in this zone...");
break;
default:
break;
}
char buf[16];
sprintf(buf, "%d", popup_response->popupid);
parse->EventPlayer(EVENT_POPUP_RESPONSE, this, buf, 0);
Mob *Target = GetTarget();
if (Target && Target->IsNPC()) {
parse->EventNPC(EVENT_POPUP_RESPONSE, Target->CastToNPC(), this, buf, 0);
}
}
void Client::Handle_OP_PotionBelt(const EQApplicationPacket *app)
{
if (app->size != sizeof(MovePotionToBelt_Struct)) {
LogDebug("Size mismatch in OP_PotionBelt expected [{}] got [{}]", sizeof(MovePotionToBelt_Struct), app->size);
DumpPacket(app);
return;
}
MovePotionToBelt_Struct *mptbs = (MovePotionToBelt_Struct*)app->pBuffer;
if (!EQ::ValueWithin(mptbs->SlotNumber, 0U, 3U)) {
LogDebug("Client::Handle_OP_PotionBelt mptbs->SlotNumber out of range");
return;
}
if (mptbs->Action == 0) {
const EQ::ItemData *BaseItem = database.GetItem(mptbs->ItemID);
if (BaseItem) {
m_pp.potionbelt.Items[mptbs->SlotNumber].ID = BaseItem->ID;
m_pp.potionbelt.Items[mptbs->SlotNumber].Icon = BaseItem->Icon;
strn0cpy(m_pp.potionbelt.Items[mptbs->SlotNumber].Name, BaseItem->Name, sizeof(BaseItem->Name));
database.SaveCharacterPotionBelt(this->CharacterID(), mptbs->SlotNumber, m_pp.potionbelt.Items[mptbs->SlotNumber].ID, m_pp.potionbelt.Items[mptbs->SlotNumber].Icon);
}
}
else {
m_pp.potionbelt.Items[mptbs->SlotNumber].ID = 0;
m_pp.potionbelt.Items[mptbs->SlotNumber].Icon = 0;
m_pp.potionbelt.Items[mptbs->SlotNumber].Name[0] = '\0';
}
}
void Client::Handle_OP_PurchaseLeadershipAA(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
LogDebug("Size mismatch in OP_LeadershipExpToggle expected [{}] got [{}]", 1, app->size);
DumpPacket(app);
return;
}
uint32 aaid = *((uint32 *)app->pBuffer);
if (aaid >= _maxLeaderAA)
return;
uint32 current_rank = m_pp.leader_abilities.ranks[aaid];
if (current_rank >= MAX_LEADERSHIP_TIERS) {
Message(Chat::Red, "This ability can be trained no further.");
return;
}
uint8 cost = LeadershipAACosts[aaid][current_rank];
if (cost == 0) {
Message(Chat::Red, "This ability can be trained no further.");
return;
}
//TODO: we need to enforce prerequisits
if (aaid >= raidAAMarkNPC) {
//it is a raid ability.
if (cost > m_pp.raid_leadership_points) {
Message(Chat::Red, "You do not have enough points to purchase this ability.");
return;
}
//sell them the ability.
m_pp.raid_leadership_points -= cost;
m_pp.leader_abilities.ranks[aaid]++;
database.SaveCharacterLeadershipAA(this->CharacterID(), &m_pp);
}
else {
//it is a group ability.
if (cost > m_pp.group_leadership_points) {
Message(Chat::Red, "You do not have enough points to purchase this ability.");
return;
}
//sell them the ability.
m_pp.group_leadership_points -= cost;
m_pp.leader_abilities.ranks[aaid]++;
database.SaveCharacterLeadershipAA(this->CharacterID(), &m_pp);
}
//success, send them an update
auto outapp = new EQApplicationPacket(OP_UpdateLeadershipAA, sizeof(UpdateLeadershipAA_Struct));
UpdateLeadershipAA_Struct *u = (UpdateLeadershipAA_Struct *)outapp->pBuffer;
u->ability_id = aaid;
u->new_rank = m_pp.leader_abilities.ranks[aaid];
if (aaid >= raidAAMarkNPC) // raid AA
u->pointsleft = m_pp.raid_leadership_points;
else // group AA
u->pointsleft = m_pp.group_leadership_points;
FastQueuePacket(&outapp);
// Update all group members with the new AA the leader has purchased.
if (IsRaidGrouped()) {
Raid *r = GetRaid();
if (!r)
return;
if (aaid >= raidAAMarkNPC) {
r->UpdateRaidAAs();
r->SendAllRaidLeadershipAA();
}
else {
uint32 gid = r->GetGroup(this);
r->UpdateGroupAAs(gid);
r->GroupUpdate(gid, false);
}
}
else if (IsGrouped()) {
Group *g = GetGroup();
if (!g)
return;
g->UpdateGroupAAs();
g->SendLeadershipAAUpdate();
}
}
void Client::Handle_OP_PVPLeaderBoardDetailsRequest(const EQApplicationPacket *app)
{
// This opcode is sent by the client when the player right clicks a name on the PVP leaderboard and sends
// further details about the selected player, e.g. Race/Class/AAs/Guild etc.
//
if (app->size != sizeof(PVPLeaderBoardDetailsRequest_Struct))
{
LogDebug("Size mismatch in OP_PVPLeaderBoardDetailsRequest expected [{}] got [{}]", sizeof(PVPLeaderBoardDetailsRequest_Struct), app->size);
DumpPacket(app);
return;
}
auto outapp = new EQApplicationPacket(OP_PVPLeaderBoardDetailsReply, sizeof(PVPLeaderBoardDetailsReply_Struct));
PVPLeaderBoardDetailsReply_Struct *pvplbdrs = (PVPLeaderBoardDetailsReply_Struct *)outapp->pBuffer;
// TODO: Record and send this data.
QueuePacket(outapp);
safe_delete(outapp);
}
void Client::Handle_OP_PVPLeaderBoardRequest(const EQApplicationPacket *app)
{
// This Opcode is sent by the client when the Leaderboard button on the PVP Stats window is pressed.
//
// It has a single uint32 payload which is the sort method:
//
// PVPSortByKills = 0, PVPSortByPoints = 1, PVPSortByInfamy = 2
//
if (app->size != sizeof(PVPLeaderBoardRequest_Struct))
{
LogDebug("Size mismatch in OP_PVPLeaderBoardRequest expected [{}] got [{}]", sizeof(PVPLeaderBoardRequest_Struct), app->size);
DumpPacket(app);
return;
}
/*PVPLeaderBoardRequest_Struct *pvplbrs = (PVPLeaderBoardRequest_Struct *)app->pBuffer;*/ //unused
auto outapp = new EQApplicationPacket(OP_PVPLeaderBoardReply, sizeof(PVPLeaderBoard_Struct));
/*PVPLeaderBoard_Struct *pvplb = (PVPLeaderBoard_Struct *)outapp->pBuffer;*/ //unused
// TODO: Record and send this data.
QueuePacket(outapp);
safe_delete(outapp);
}
void Client::Handle_OP_QueryUCSServerStatus(const EQApplicationPacket *app)
{
if (zone->IsUCSServerAvailable()) {
EQApplicationPacket* outapp = nullptr;
std::string buffer;
std::string MailKey = database.GetMailKey(CharacterID(), true);
EQ::versions::UCSVersion ConnectionType = EQ::versions::ucsUnknown;
// chat server packet
switch (ClientVersion()) {
case EQ::versions::ClientVersion::Titanium:
ConnectionType = EQ::versions::ucsTitaniumChat;
break;
case EQ::versions::ClientVersion::SoF:
ConnectionType = EQ::versions::ucsSoFCombined;
break;
case EQ::versions::ClientVersion::SoD:
ConnectionType = EQ::versions::ucsSoDCombined;
break;
case EQ::versions::ClientVersion::UF:
ConnectionType = EQ::versions::ucsUFCombined;
break;
case EQ::versions::ClientVersion::RoF:
ConnectionType = EQ::versions::ucsRoFCombined;
break;
case EQ::versions::ClientVersion::RoF2:
ConnectionType = EQ::versions::ucsRoF2Combined;
break;
default:
ConnectionType = EQ::versions::ucsUnknown;
break;
}
buffer = StringFormat("%s,%i,%s.%s,%c%s",
Config->ChatHost.c_str(),
Config->ChatPort,
Config->ShortName.c_str(),
GetName(),
ConnectionType,
MailKey.c_str()
);
outapp = new EQApplicationPacket(OP_SetChatServer, (buffer.length() + 1));
memcpy(outapp->pBuffer, buffer.c_str(), buffer.length());
outapp->pBuffer[buffer.length()] = '\0';
QueuePacket(outapp);
safe_delete(outapp);
// mail server packet
switch (ClientVersion()) {
case EQ::versions::ClientVersion::Titanium:
ConnectionType = EQ::versions::ucsTitaniumMail;
break;
default:
// retain value from previous switch
break;
}
buffer = StringFormat("%s,%i,%s.%s,%c%s",
Config->MailHost.c_str(),
Config->MailPort,
Config->ShortName.c_str(),
GetName(),
ConnectionType,
MailKey.c_str()
);
outapp = new EQApplicationPacket(OP_SetChatServer2, (buffer.length() + 1));
memcpy(outapp->pBuffer, buffer.c_str(), buffer.length());
outapp->pBuffer[buffer.length()] = '\0';
QueuePacket(outapp);
safe_delete(outapp);
}
}
void Client::Handle_OP_RaidCommand(const EQApplicationPacket *app)
{
if (app->size < sizeof(RaidGeneral_Struct)) {
LogError("Wrong size: OP_RaidCommand, size=[{}], expected at least [{}]", app->size, sizeof(RaidGeneral_Struct));
DumpPacket(app);
return;
}
RaidGeneral_Struct *raid_command_packet = (RaidGeneral_Struct*)app->pBuffer;
switch (raid_command_packet->action)
{
case RaidCommandInviteIntoExisting:
case RaidCommandInvite: {
Client *player_to_invite = entity_list.GetClientByName(raid_command_packet->player_name);
if (!player_to_invite)
break;
Group *player_to_invite_group = player_to_invite->GetGroup();
if (player_to_invite->HasRaid()) {
Message(Chat::Red, "%s is already in a raid.", player_to_invite->GetName());
break;
}
if (player_to_invite_group && player_to_invite_group->IsGroupMember(this)) {
MessageString(Chat::Red, ALREADY_IN_PARTY);
break;
}
if (player_to_invite_group && !player_to_invite_group->IsLeader(player_to_invite)) {
Message(Chat::Red, "You can only invite an ungrouped player or group leader to join your raid.");
break;
}
/* Send out invite to the client */
auto outapp = new EQApplicationPacket(OP_RaidUpdate, sizeof(RaidGeneral_Struct));
RaidGeneral_Struct *raid_command = (RaidGeneral_Struct*)outapp->pBuffer;
strn0cpy(raid_command->leader_name, raid_command_packet->leader_name, 64);
strn0cpy(raid_command->player_name, raid_command_packet->player_name, 64);
raid_command->parameter = 0;
raid_command->action = 20;
player_to_invite->QueuePacket(outapp);
safe_delete(outapp);
break;
}
case RaidCommandAcceptInvite: {
Client *player_accepting_invite = entity_list.GetClientByName(raid_command_packet->player_name);
if (player_accepting_invite) {
if (IsRaidGrouped()) {
player_accepting_invite->MessageString(Chat::White, ALREADY_IN_RAID, GetName()); //group failed, must invite members not in raid...
return;
}
Raid *raid = entity_list.GetRaidByClient(player_accepting_invite);
if (raid) {
raid->VerifyRaid();
Group *group = GetGroup();
if (group) {
if (group->GroupCount() + raid->RaidCount() > MAX_RAID_MEMBERS) {
player_accepting_invite->Message(Chat::Red, "Invite failed, group invite would create a raid larger than the maximum number of members allowed.");
return;
}
}
else {
if (1 + raid->RaidCount() > MAX_RAID_MEMBERS) {
player_accepting_invite->Message(Chat::Red, "Invite failed, member invite would create a raid larger than the maximum number of members allowed.");
return;
}
}
if (group) {//add us all
uint32 free_group_id = raid->GetFreeGroup();
Client *addClient = nullptr;
for (int x = 0; x < 6; x++) {
if (group->members[x]) {
Client *c = nullptr;
if (group->members[x]->IsClient())
c = group->members[x]->CastToClient();
else
continue;
if (!addClient)
{
addClient = c;
raid->SetGroupLeader(addClient->GetName());
}
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
if (group->IsLeader(group->members[x]))
raid->AddMember(c, free_group_id, false, true);
else
raid->AddMember(c, free_group_id);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
}
group->JoinRaidXTarget(raid);
group->DisbandGroup(true);
raid->GroupUpdate(free_group_id);
}
else {
raid->SendRaidCreate(this);
raid->SendMakeLeaderPacketTo(raid->leadername, this);
raid->AddMember(this);
raid->SendBulkRaid(this);
if (raid->IsLocked()) {
raid->SendRaidLockTo(this);
}
}
}
else
{
Group *player_invited_group = player_accepting_invite->GetGroup();
Group *group = GetGroup();
if (group) //if our target has a group
{
raid = new Raid(player_accepting_invite);
entity_list.AddRaid(raid);
raid->SetRaidDetails();
uint32 raid_free_group_id = raid->GetFreeGroup();
/* If we already have a group then cycle through adding us... */
if (player_invited_group) {
Client *client_to_be_leader = nullptr;
for (int x = 0; x < 6; x++) {
if (player_invited_group->members[x]) {
if (!client_to_be_leader) {
if (player_invited_group->members[x]->IsClient()) {
client_to_be_leader = player_invited_group->members[x]->CastToClient();
raid->SetGroupLeader(client_to_be_leader->GetName());
}
}
if (player_invited_group->IsLeader(player_invited_group->members[x])) {
Client *c = nullptr;
if (player_invited_group->members[x]->IsClient())
c = player_invited_group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, raid_free_group_id, true, true, true);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
else {
Client *c = nullptr;
if (player_invited_group->members[x]->IsClient())
c = player_invited_group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, raid_free_group_id);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
}
}
player_invited_group->JoinRaidXTarget(raid, true);
player_invited_group->DisbandGroup(true);
raid->GroupUpdate(raid_free_group_id);
raid_free_group_id = raid->GetFreeGroup();
}
else {
raid->SendRaidCreate(player_accepting_invite);
raid->AddMember(player_accepting_invite, 0xFFFFFFFF, true, false, true);
}
Client *client_to_add = nullptr;
/* Add client to an existing group */
for (int x = 0; x < 6; x++) {
if (group->members[x]) {
if (!client_to_add) {
if (group->members[x]->IsClient()) {
client_to_add = group->members[x]->CastToClient();
raid->SetGroupLeader(client_to_add->GetName());
}
}
if (group->IsLeader(group->members[x])) {
Client *c = nullptr;
if (group->members[x]->IsClient())
c = group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, raid_free_group_id, false, true);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
else
{
Client *c = nullptr;
if (group->members[x]->IsClient())
c = group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, raid_free_group_id);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
}
}
group->JoinRaidXTarget(raid);
group->DisbandGroup(true);
raid->GroupUpdate(raid_free_group_id);
}
/* Target does not have a group */
else {
if (player_invited_group) {
raid = new Raid(player_accepting_invite);
entity_list.AddRaid(raid);
raid->SetRaidDetails();
Client *addClientig = nullptr;
for (int x = 0; x < 6; x++) {
if (player_invited_group->members[x]) {
if (!addClientig) {
if (player_invited_group->members[x]->IsClient()) {
addClientig = player_invited_group->members[x]->CastToClient();
raid->SetGroupLeader(addClientig->GetName());
}
}
if (player_invited_group->IsLeader(player_invited_group->members[x])) {
Client *c = nullptr;
if (player_invited_group->members[x]->IsClient())
c = player_invited_group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, 0, true, true, true);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
else
{
Client *c = nullptr;
if (player_invited_group->members[x]->IsClient())
c = player_invited_group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, 0);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
}
}
raid->SendRaidCreate(this);
raid->SendMakeLeaderPacketTo(raid->leadername, this);
raid->SendBulkRaid(this);
player_invited_group->JoinRaidXTarget(raid, true);
raid->AddMember(this);
player_invited_group->DisbandGroup(true);
raid->GroupUpdate(0);
if (raid->IsLocked()) {
raid->SendRaidLockTo(this);
}
}
else { // neither has a group
raid = new Raid(player_accepting_invite);
entity_list.AddRaid(raid);
raid->SetRaidDetails();
raid->SendRaidCreate(player_accepting_invite);
raid->SendRaidCreate(this);
raid->SendMakeLeaderPacketTo(raid->leadername, this);
raid->AddMember(player_accepting_invite, 0xFFFFFFFF, true, false, true);
raid->SendBulkRaid(this);
raid->AddMember(this);
if (raid->IsLocked()) {
raid->SendRaidLockTo(this);
}
}
}
}
}
break;
}
case RaidCommandDisband: {
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
uint32 group = raid->GetGroup(raid_command_packet->leader_name);
if (group < 12) {
uint32 i = raid->GetPlayerIndex(raid_command_packet->leader_name);
if (raid->members[i].IsGroupLeader) { //assign group leader to someone else
for (int x = 0; x < MAX_RAID_MEMBERS; x++) {
if (strlen(raid->members[x].membername) > 0 && i != x) {
if (raid->members[x].GroupNumber == group) {
raid->SetGroupLeader(raid_command_packet->leader_name, false);
raid->SetGroupLeader(raid->members[x].membername);
raid->UpdateGroupAAs(group);
break;
}
}
}
}
if (raid->members[i].IsRaidLeader) {
for (int x = 0; x < MAX_RAID_MEMBERS; x++) {
if (strlen(raid->members[x].membername) > 0 && strcmp(raid->members[x].membername, raid->members[i].membername) != 0)
{
raid->SetRaidLeader(raid->members[i].membername, raid->members[x].membername);
raid->UpdateRaidAAs();
raid->SendAllRaidLeadershipAA();
break;
}
}
}
}
raid->RemoveMember(raid_command_packet->leader_name);
Client *c = entity_list.GetClientByName(raid_command_packet->leader_name);
if (c)
raid->SendGroupDisband(c);
else {
auto pack =
new ServerPacket(ServerOP_RaidGroupDisband, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct* rga = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
rga->rid = GetID();
rga->zoneid = zone->GetZoneID();
rga->instance_id = zone->GetInstanceID();
strn0cpy(rga->playername, raid_command_packet->leader_name, 64);
worldserver.SendPacket(pack);
safe_delete(pack);
}
//r->SendRaidGroupRemove(ri->leader_name, grp);
raid->GroupUpdate(group);// break
//}
}
break;
}
case RaidCommandMoveGroup:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
/* Moving to group */
if (raid_command_packet->parameter < 12) {
uint8 group_count = raid->GroupCount(raid_command_packet->parameter);
if (group_count < 6) {
Client *c = entity_list.GetClientByName(raid_command_packet->leader_name);
uint32 old_group = raid->GetGroup(raid_command_packet->leader_name);
if (raid_command_packet->parameter == old_group) //don't rejoin grp if we order to join same group.
break;
if (raid->members[raid->GetPlayerIndex(raid_command_packet->leader_name)].IsGroupLeader) {
raid->SetGroupLeader(raid_command_packet->leader_name, false);
/* We were the leader of our old group */
if (old_group < 12) {
/* Assign new group leader if we can */
for (int x = 0; x < MAX_RAID_MEMBERS; x++) {
if (raid->members[x].GroupNumber == old_group) {
if (strcmp(raid_command_packet->leader_name, raid->members[x].membername) != 0 && strlen(raid_command_packet->leader_name) > 0) {
raid->SetGroupLeader(raid->members[x].membername);
raid->UpdateGroupAAs(old_group);
Client *client_to_update = entity_list.GetClientByName(raid->members[x].membername);
if (client_to_update) {
raid->SendRaidRemove(raid->members[x].membername, client_to_update);
raid->SendRaidCreate(client_to_update);
raid->SendMakeLeaderPacketTo(raid->leadername, client_to_update);
raid->SendRaidAdd(raid->members[x].membername, client_to_update);
raid->SendBulkRaid(client_to_update);
if (raid->IsLocked()) {
raid->SendRaidLockTo(client_to_update);
}
}
else {
auto pack = new ServerPacket(ServerOP_RaidChangeGroup, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct *raid_command_packet = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
raid_command_packet->rid = raid->GetID();
raid_command_packet->zoneid = zone->GetZoneID();
raid_command_packet->instance_id = zone->GetInstanceID();
strn0cpy(raid_command_packet->playername, raid->members[x].membername, 64);
worldserver.SendPacket(pack);
safe_delete(pack);
}
break;
}
}
}
}
}
if (group_count == 0) {
raid->SetGroupLeader(raid_command_packet->leader_name);
raid->UpdateGroupAAs(raid_command_packet->parameter);
}
raid->MoveMember(raid_command_packet->leader_name, raid_command_packet->parameter);
if (c) {
raid->SendGroupDisband(c);
}
else {
auto pack = new ServerPacket(ServerOP_RaidGroupDisband, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct* raid_command = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
raid_command->rid = raid->GetID();
raid_command->zoneid = zone->GetZoneID();
raid_command->instance_id = zone->GetInstanceID();
strn0cpy(raid_command->playername, raid_command_packet->leader_name, 64);
worldserver.SendPacket(pack);
safe_delete(pack);
}
/* Send group update to our new group */
raid->GroupUpdate(raid_command_packet->parameter);
/* If our old was a group send update there too */
if (old_group < 12)
raid->GroupUpdate(old_group);
}
}
/* Move player to ungrouped bank */
else {
Client *c = entity_list.GetClientByName(raid_command_packet->leader_name);
uint32 oldgrp = raid->GetGroup(raid_command_packet->leader_name);
if (raid->members[raid->GetPlayerIndex(raid_command_packet->leader_name)].IsGroupLeader) {
raid->SetGroupLeader(raid_command_packet->leader_name, false);
for (int x = 0; x < MAX_RAID_MEMBERS; x++) {
if (raid->members[x].GroupNumber == oldgrp && strlen(raid->members[x].membername) > 0 && strcmp(raid->members[x].membername, raid_command_packet->leader_name) != 0){
raid->SetGroupLeader(raid->members[x].membername);
raid->UpdateGroupAAs(oldgrp);
Client *client_leaving_group = entity_list.GetClientByName(raid->members[x].membername);
if (client_leaving_group) {
raid->SendRaidRemove(raid->members[x].membername, client_leaving_group);
raid->SendRaidCreate(client_leaving_group);
raid->SendMakeLeaderPacketTo(raid->leadername, client_leaving_group);
raid->SendRaidAdd(raid->members[x].membername, client_leaving_group);
raid->SendBulkRaid(client_leaving_group);
if (raid->IsLocked()) {
raid->SendRaidLockTo(client_leaving_group);
}
}
else {
auto pack = new ServerPacket( ServerOP_RaidChangeGroup, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct *raid_command = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
raid_command->rid = raid->GetID();
strn0cpy(raid_command->playername, raid->members[x].membername, 64);
raid_command->zoneid = zone->GetZoneID();
raid_command->instance_id = zone->GetInstanceID();
worldserver.SendPacket(pack);
safe_delete(pack);
}
break;
}
}
}
raid->MoveMember(raid_command_packet->leader_name, 0xFFFFFFFF);
if (c) {
raid->SendGroupDisband(c);
}
else {
auto pack = new ServerPacket(ServerOP_RaidGroupDisband, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct* raid_command = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
raid_command->rid = raid->GetID();
raid_command->zoneid = zone->GetZoneID();
raid_command->instance_id = zone->GetInstanceID();
strn0cpy(raid_command->playername, raid_command_packet->leader_name, 64);
worldserver.SendPacket(pack);
safe_delete(pack);
}
raid->GroupUpdate(oldgrp);
}
}
Client *client_moved = entity_list.GetClientByName(raid_command_packet->leader_name);
if (client_moved && client_moved->GetRaid()) {
client_moved->GetRaid()->SendHPManaEndPacketsTo(client_moved);
client_moved->GetRaid()->SendHPManaEndPacketsFrom(client_moved);
Log(Logs::General, Logs::HPUpdate,
"Client::Handle_OP_RaidCommand :: %s sending and recieving HP/Mana/End updates",
client_moved->GetCleanName()
);
}
break;
}
case RaidCommandRaidLock:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
if (!raid->IsLocked())
raid->LockRaid(true);
else
raid->SendRaidLockTo(this);
}
break;
}
case RaidCommandRaidUnlock:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid)
{
if (raid->IsLocked())
raid->LockRaid(false);
else
raid->SendRaidUnlockTo(this);
}
break;
}
case RaidCommandLootType2:
case RaidCommandLootType:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
Message(Chat::Yellow, "Loot type changed to: %d.", raid_command_packet->parameter);
raid->ChangeLootType(raid_command_packet->parameter);
}
break;
}
case RaidCommandAddLooter2:
case RaidCommandAddLooter:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
Message(Chat::Yellow, "Adding %s as a raid looter.", raid_command_packet->leader_name);
raid->AddRaidLooter(raid_command_packet->leader_name);
}
break;
}
case RaidCommandRemoveLooter2:
case RaidCommandRemoveLooter:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
Message(Chat::Yellow, "Removing %s as a raid looter.", raid_command_packet->leader_name);
raid->RemoveRaidLooter(raid_command_packet->leader_name);
}
break;
}
case RaidCommandMakeLeader:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
if (strcmp(raid->leadername, GetName()) == 0) {
raid->SetRaidLeader(GetName(), raid_command_packet->leader_name);
raid->UpdateRaidAAs();
raid->SendAllRaidLeadershipAA();
}
}
break;
}
case RaidCommandSetMotd:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (!raid)
break;
// we don't use the RaidGeneral here!
RaidMOTD_Struct *motd = (RaidMOTD_Struct *)app->pBuffer;
raid->SetRaidMOTD(std::string(motd->motd));
raid->SaveRaidMOTD();
raid->SendRaidMOTDToWorld();
break;
}
default: {
Message(Chat::Red, "Raid command (%d) NYI", raid_command_packet->action);
break;
}
}
}
void Client::Handle_OP_RandomReq(const EQApplicationPacket *app)
{
if (app->size != sizeof(RandomReq_Struct)) {
LogError("Wrong size: OP_RandomReq, size=[{}], expected [{}]", app->size, sizeof(RandomReq_Struct));
return;
}
const RandomReq_Struct* rndq = (const RandomReq_Struct*)app->pBuffer;
uint32 randLow = rndq->low > rndq->high ? rndq->high : rndq->low;
uint32 randHigh = rndq->low > rndq->high ? rndq->low : rndq->high;
uint32 randResult;
if (randLow == 0 && randHigh == 0)
{ // defaults
randLow = 0;
randHigh = 100;
}
randResult = zone->random.Int(randLow, randHigh);
auto outapp = new EQApplicationPacket(OP_RandomReply, sizeof(RandomReply_Struct));
RandomReply_Struct* rr = (RandomReply_Struct*)outapp->pBuffer;
rr->low = randLow;
rr->high = randHigh;
rr->result = randResult;
strcpy(rr->name, GetName());
entity_list.QueueCloseClients(this, outapp, false, 400);
safe_delete(outapp);
return;
}
void Client::Handle_OP_ReadBook(const EQApplicationPacket *app)
{
if (app->size != sizeof(BookRequest_Struct)) {
LogError("Wrong size: OP_ReadBook, size=[{}], expected [{}]", app->size, sizeof(BookRequest_Struct));
return;
}
BookRequest_Struct* book = (BookRequest_Struct*)app->pBuffer;
ReadBook(book);
if (ClientVersion() >= EQ::versions::ClientVersion::SoF)
{
EQApplicationPacket EndOfBook(OP_FinishWindow, 0);
QueuePacket(&EndOfBook);
}
return;
}
void Client::Handle_OP_RecipeAutoCombine(const EQApplicationPacket *app)
{
if (app->size != sizeof(RecipeAutoCombine_Struct)) {
LogError("Invalid size for RecipeAutoCombine_Struct: Expected: [{}], Got: [{}]",
sizeof(RecipeAutoCombine_Struct), app->size);
return;
}
RecipeAutoCombine_Struct* rac = (RecipeAutoCombine_Struct*)app->pBuffer;
Object::HandleAutoCombine(this, rac);
return;
}
void Client::Handle_OP_RecipeDetails(const EQApplicationPacket *app)
{
if (app->size < sizeof(uint32)) {
LogError("Invalid size for RecipeDetails Request: Expected: [{}], Got: [{}]",
sizeof(uint32), app->size);
return;
}
uint32 *recipe_id = (uint32*)app->pBuffer;
SendTradeskillDetails(*recipe_id);
return;
}
void Client::Handle_OP_RecipesFavorite(const EQApplicationPacket *app)
{
if (app->size != sizeof(TradeskillFavorites_Struct)) {
LogError("Invalid size for TradeskillFavorites_Struct: Expected: [{}], Got: [{}]",
sizeof(TradeskillFavorites_Struct), app->size);
return;
}
TradeskillFavorites_Struct* tsf = (TradeskillFavorites_Struct*)app->pBuffer;
LogDebug("Requested Favorites for: [{}] - [{}]\n", tsf->object_type, tsf->some_id);
// results show that object_type is combiner type
// some_id = 0 if world combiner, item number otherwise
// make where clause segment for container(s)
std::string containers;
uint32 combineObjectSlots;
if (tsf->some_id == 0) {
containers += StringFormat(" = %u ", tsf->object_type); // world combiner so no item number
combineObjectSlots = 10;
}
else {
containers += StringFormat(" in (%u, %u) ", tsf->object_type, tsf->some_id); // container in inventory
auto item = database.GetItem(tsf->some_id);
if (!item)
{
LogError("Invalid container ID: [{}]. GetItem returned null. Defaulting to BagSlots = 10.\n", tsf->some_id);
combineObjectSlots = 10;
}
else
{
combineObjectSlots = item->BagSlots;
}
}
std::string favoriteIDs; //gotta be big enough for 500 IDs
bool first = true;
//Assumes item IDs are <10 characters long
for (uint16 favoriteIndex = 0; favoriteIndex < 500; ++favoriteIndex) {
if (tsf->favorite_recipes[favoriteIndex] == 0)
continue;
if (first) {
favoriteIDs += StringFormat("%u", tsf->favorite_recipes[favoriteIndex]);
first = false;
}
else
favoriteIDs += StringFormat(",%u", tsf->favorite_recipes[favoriteIndex]);
}
if (first) //no favorites....
return;
// TODO: Clean this up
const std::string query = StringFormat(
SQL (
SELECT
tr.id,
tr.name,
tr.trivial,
SUM(tre.componentcount),
tr.tradeskill
FROM
tradeskill_recipe AS tr
LEFT JOIN tradeskill_recipe_entries AS tre ON tr.id = tre.recipe_id
WHERE
tr.enabled <> 0
AND tr.id IN (%s)
AND tr.must_learn & 0x20 <> 0x20
AND (
(
tr.must_learn & 0x3 <> 0
)
OR (tr.must_learn & 0x3 = 0)
)
%s
GROUP BY
tr.id
HAVING
sum(
if(
tre.item_id %s
AND tre.iscontainer > 0,
1,
0
)
) > 0
AND SUM(tre.componentcount) <= %u
LIMIT
100
),
favoriteIDs.c_str(),
ContentFilterCriteria::apply().c_str(),
containers.c_str(),
combineObjectSlots
);
SendTradeskillSearchResults(query, tsf->object_type, tsf->some_id);
}
void Client::Handle_OP_RecipesSearch(const EQApplicationPacket *app)
{
if (app->size != sizeof(RecipesSearch_Struct)) {
LogError(
"Invalid size for RecipesSearch_Struct: Expected: [{}], Got: [{}]",
sizeof(RecipesSearch_Struct),
app->size
);
return;
}
auto* p_recipes_search_struct = (RecipesSearch_Struct*)app->pBuffer;
p_recipes_search_struct->query[55] = '\0'; //just to be sure.
LogTradeskills(
"[Handle_OP_RecipesSearch] Requested search recipes for object_type [{}] some_id [{}]",
p_recipes_search_struct->object_type,
p_recipes_search_struct->some_id
);
char containers_where_clause[30];
uint32 combine_object_slots;
if (p_recipes_search_struct->some_id == 0) {
// world combiner so no item number
snprintf(containers_where_clause, 29, "= %u", p_recipes_search_struct->object_type);
combine_object_slots = 10;
}
else {
// container in inventory
snprintf(containers_where_clause, 29, "in (%u,%u)", p_recipes_search_struct->object_type, p_recipes_search_struct->some_id);
auto item = database.GetItem(p_recipes_search_struct->some_id);
if (!item) {
LogError(
"Invalid container ID: [{}]. GetItem returned null. Defaulting to BagSlots = 10.",
p_recipes_search_struct->some_id
);
combine_object_slots = 10;
}
else {
combine_object_slots = item->BagSlots;
}
}
std::string search_clause;
if (p_recipes_search_struct->query[0] != 0) {
char buf[120]; //larger than 2X rss->query
database.DoEscapeString(buf, p_recipes_search_struct->query, strlen(p_recipes_search_struct->query));
search_clause = StringFormat("name rlike '%s' AND", buf);
}
//arbitrary limit of 200 recipes, makes sense to me.
// TODO: Clean this up
std::string query = fmt::format(
SQL(
SELECT
tr.id,
tr.name,
tr.trivial,
SUM(tre.componentcount),
tr.tradeskill
FROM
tradeskill_recipe AS tr
LEFT JOIN tradeskill_recipe_entries AS tre ON tr.id = tre.recipe_id
WHERE
{} tr.trivial >= {}
AND tr.trivial <= {}
AND tr.enabled <> 0
AND tr.must_learn & 0x20 <> 0x20
AND (
(
tr.must_learn & 0x3 <> 0
)
OR (tr.must_learn & 0x3 = 0)
)
{}
GROUP BY
tr.id
HAVING
sum(
if (
tre.item_id {}
AND tre.iscontainer > 0,
1,
0
)
) > 0
AND SUM(tre.componentcount) <= {}
LIMIT
200
),
search_clause,
p_recipes_search_struct->mintrivial,
p_recipes_search_struct->maxtrivial,
ContentFilterCriteria::apply(),
containers_where_clause,
combine_object_slots
);
SendTradeskillSearchResults(query, p_recipes_search_struct->object_type, p_recipes_search_struct->some_id);
}
void Client::Handle_OP_ReloadUI(const EQApplicationPacket *app)
{
if (IsInAGuild())
{
SendGuildRanks();
SendGuildMembers();
}
return;
}
void Client::Handle_OP_RemoveBlockedBuffs(const EQApplicationPacket *app)
{
if (!RuleB(Spells, EnableBlockedBuffs))
return;
if (app->size != sizeof(BlockedBuffs_Struct))
{
LogDebug("Size mismatch in OP_RemoveBlockedBuffs expected [{}] got [{}]", sizeof(BlockedBuffs_Struct), app->size);
DumpPacket(app);
return;
}
BlockedBuffs_Struct *bbs = (BlockedBuffs_Struct*)app->pBuffer;
std::set<uint32> *BlockedBuffs = bbs->Pet ? &PetBlockedBuffs : &PlayerBlockedBuffs;
std::set<uint32> RemovedBuffs;
if (bbs->Count > 0)
{
std::set<uint32>::iterator Iterator;
auto outapp = new EQApplicationPacket(OP_RemoveBlockedBuffs, sizeof(BlockedBuffs_Struct));
BlockedBuffs_Struct *obbs = (BlockedBuffs_Struct*)outapp->pBuffer;
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
obbs->SpellID[i] = 0;
obbs->Pet = bbs->Pet;
obbs->Initialise = 0;
obbs->Flags = 0x5a;
for (unsigned int i = 0; i < bbs->Count; ++i)
{
Iterator = BlockedBuffs->find(bbs->SpellID[i]);
if (Iterator != BlockedBuffs->end())
{
RemovedBuffs.insert(bbs->SpellID[i]);
BlockedBuffs->erase(Iterator);
}
}
obbs->Count = RemovedBuffs.size();
Iterator = RemovedBuffs.begin();
unsigned int Element = 0;
while (Iterator != RemovedBuffs.end())
{
obbs->SpellID[Element++] = (*Iterator);
++Iterator;
}
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_RemoveTrap(const EQApplicationPacket *app)
{
if (app->size != 4) {// just an int
LogDebug("Size mismatch in OP_RemoveTrap expected 4 got [{}]", app->size);
DumpPacket(app);
return;
}
auto id = app->ReadUInt32(0);
bool good = false;
for (int i = 0; i < trap_mgr.count; ++i) {
if (trap_mgr.auras[i].spawn_id == id) {
good = true;
break;
}
}
if (good)
RemoveAura(id);
else
MessageString(Chat::SpellFailure, NOT_YOUR_TRAP); // pretty sure this was red
}
void Client::Handle_OP_Report(const EQApplicationPacket *app)
{
if (!CanUseReport)
{
MessageString(Chat::System, REPORT_ONCE);
return;
}
uint32 size = app->size;
uint32 current_point = 0;
std::string reported, reporter;
std::string current_string;
int mode = 0;
while (current_point < size)
{
if (mode < 2)
{
if (app->pBuffer[current_point] == '|')
{
mode++;
}
else
{
if (mode == 0)
{
reported += app->pBuffer[current_point];
}
else
{
reporter += app->pBuffer[current_point];
}
}
current_point++;
}
else
{
if (app->pBuffer[current_point] == 0x0a)
{
current_string += '\n';
}
else if (app->pBuffer[current_point] == 0x00)
{
CanUseReport = false;
database.AddReport(reporter, reported, current_string);
return;
}
else
{
current_string += app->pBuffer[current_point];
}
current_point++;
}
}
CanUseReport = false;
database.AddReport(reporter, reported, current_string);
}
void Client::Handle_OP_RequestDuel(const EQApplicationPacket *app)
{
if (app->size != sizeof(Duel_Struct))
return;
EQApplicationPacket* outapp = app->Copy();
Duel_Struct* ds = (Duel_Struct*)outapp->pBuffer;
uint32 duel = ds->duel_initiator;
ds->duel_initiator = ds->duel_target;
ds->duel_target = duel;
Entity* entity = entity_list.GetID(ds->duel_target);
if (GetID() != ds->duel_target && entity->IsClient() && (entity->CastToClient()->IsDueling() && entity->CastToClient()->GetDuelTarget() != 0)) {
MessageString(Chat::NPCQuestSay, DUEL_CONSIDERING, entity->GetName());
return;
}
if (IsDueling()) {
MessageString(Chat::NPCQuestSay, DUEL_INPROGRESS);
return;
}
if (GetID() != ds->duel_target && entity->IsClient() && GetDuelTarget() == 0 && !IsDueling() && !entity->CastToClient()->IsDueling() && entity->CastToClient()->GetDuelTarget() == 0) {
SetDuelTarget(ds->duel_target);
entity->CastToClient()->SetDuelTarget(GetID());
ds->duel_target = ds->duel_initiator;
entity->CastToClient()->FastQueuePacket(&outapp);
entity->CastToClient()->SetDueling(false);
SetDueling(false);
}
else
safe_delete(outapp);
return;
}
void Client::Handle_OP_RequestTitles(const EQApplicationPacket *app)
{
EQApplicationPacket *outapp = title_manager.MakeTitlesPacket(this);
if (outapp != nullptr)
FastQueuePacket(&outapp);
}
void Client::Handle_OP_RespawnWindow(const EQApplicationPacket *app)
{
// This opcode is sent by the client when the player choses which bind to return to.
// The client sends just a 4 byte packet with the selection number in it
//
if (app->size != 4)
{
LogDebug("Size mismatch in OP_RespawnWindow expected [{}] got [{}]", 4, app->size);
DumpPacket(app);
return;
}
char *Buffer = (char *)app->pBuffer;
uint32 Option = VARSTRUCT_DECODE_TYPE(uint32, Buffer);
HandleRespawnFromHover(Option);
}
void Client::Handle_OP_Rewind(const EQApplicationPacket *app)
{
if ((rewind_timer.GetRemainingTime() > 1 && rewind_timer.Enabled())) {
MessageString(Chat::System, REWIND_WAIT);
}
else {
CastToClient()->MovePC(zone->GetZoneID(), zone->GetInstanceID(), m_RewindLocation.x, m_RewindLocation.y, m_RewindLocation.z, 0, 2, Rewind);
rewind_timer.Start(30000, true);
}
}
void Client::Handle_OP_RezzAnswer(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_RezzAnswer, app, Resurrect_Struct);
const Resurrect_Struct* ra = (const Resurrect_Struct*)app->pBuffer;
LogSpells("Received OP_RezzAnswer from client. Pendingrezzexp is [{}], action is [{}]",
PendingRezzXP, ra->action ? "ACCEPT" : "DECLINE");
OPRezzAnswer(ra->action, ra->spellid, ra->zone_id, ra->instance_id, ra->x, ra->y, ra->z);
if (ra->action == 1)
{
EQApplicationPacket* outapp = app->Copy();
// Send the OP_RezzComplete to the world server. This finds it's way to the zone that
// the rezzed corpse is in to mark the corpse as rezzed.
outapp->SetOpcode(OP_RezzComplete);
worldserver.RezzPlayer(outapp, 0, 0, OP_RezzComplete);
safe_delete(outapp);
}
return;
}
void Client::Handle_OP_Sacrifice(const EQApplicationPacket *app)
{
if (app->size != sizeof(Sacrifice_Struct)) {
LogDebug("Size mismatch in OP_Sacrifice expected [{}] got [{}]", sizeof(Sacrifice_Struct), app->size);
DumpPacket(app);
return;
}
Sacrifice_Struct *ss = (Sacrifice_Struct*)app->pBuffer;
if (!PendingSacrifice) {
LogError("Unexpected OP_Sacrifice reply");
DumpPacket(app);
return;
}
if (ss->Confirm) {
Client *Caster = entity_list.GetClientByName(SacrificeCaster.c_str());
if (Caster) Sacrifice(Caster);
}
PendingSacrifice = false;
SacrificeCaster.clear();
}
void Client::Handle_OP_SafeFallSuccess(const EQApplicationPacket *app) // bit of a misnomer, sent whenever safe fall is used (success of fail)
{
if (HasSkill(EQ::skills::SkillSafeFall)) //this should only get called if the client has safe fall, but just in case...
CheckIncreaseSkill(EQ::skills::SkillSafeFall, nullptr); //check for skill up
}
void Client::Handle_OP_SafePoint(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_Save(const EQApplicationPacket *app)
{
// The payload is 192 bytes - Not sure what is contained in payload
Save();
return;
}
void Client::Handle_OP_SaveOnZoneReq(const EQApplicationPacket *app)
{
Handle_OP_Save(app);
}
void Client::Handle_OP_SelectTribute(const EQApplicationPacket *app)
{
LogTribute("Received OP_SelectTribute of length [{}]", app->size);
//we should enforce being near a real tribute master to change this
//but im not sure how I wanna do that right now.
if (app->size != sizeof(SelectTributeReq_Struct))
LogError("Invalid size on OP_SelectTribute packet");
else {
SelectTributeReq_Struct *t = (SelectTributeReq_Struct *)app->pBuffer;
SendTributeDetails(t->client_id, t->tribute_id);
}
return;
}
void Client::Handle_OP_SenseHeading(const EQApplicationPacket *app)
{
if (!HasSkill(EQ::skills::SkillSenseHeading))
return;
int chancemod = 0;
CheckIncreaseSkill(EQ::skills::SkillSenseHeading, nullptr, chancemod);
return;
}
void Client::Handle_OP_SenseTraps(const EQApplicationPacket *app)
{
if (!HasSkill(EQ::skills::SkillSenseTraps))
return;
if (!p_timers.Expired(&database, pTimerSenseTraps, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
int reuse = SenseTrapsReuseTime - GetSkillReuseTime(EQ::skills::SkillSenseTraps);
if (reuse < 1)
reuse = 1;
p_timers.Start(pTimerSenseTraps, reuse - 1);
float trap_curdist = 0;
Trap* trap = entity_list.FindNearbyTrap(this, 800, trap_curdist);
CheckIncreaseSkill(EQ::skills::SkillSenseTraps, nullptr);
if (trap && trap->skill > 0) {
int uskill = GetSkill(EQ::skills::SkillSenseTraps);
if ((zone->random.Int(0, 99) + uskill) >= (zone->random.Int(0, 99) + trap->skill*0.75))
{
auto diff = trap->m_Position - glm::vec3(GetPosition());
if (diff.x == 0 && diff.y == 0)
Message(Chat::Skills, "You sense a trap right under your feet!");
else if (diff.x > 10 && diff.y > 10)
Message(Chat::Skills, "You sense a trap to the NorthWest.");
else if (diff.x < -10 && diff.y > 10)
Message(Chat::Skills, "You sense a trap to the NorthEast.");
else if (diff.y > 10)
Message(Chat::Skills, "You sense a trap to the North.");
else if (diff.x > 10 && diff.y < -10)
Message(Chat::Skills, "You sense a trap to the SouthWest.");
else if (diff.x < -10 && diff.y < -10)
Message(Chat::Skills, "You sense a trap to the SouthEast.");
else if (diff.y < -10)
Message(Chat::Skills, "You sense a trap to the South.");
else if (diff.x > 10)
Message(Chat::Skills, "You sense a trap to the West.");
else
Message(Chat::Skills, "You sense a trap to the East.");
trap->detected = true;
float angle = CalculateHeadingToTarget(trap->m_Position.x, trap->m_Position.y);
if (angle < 0)
angle = (256 + angle);
angle *= 2;
MovePC(zone->GetZoneID(), zone->GetInstanceID(), GetX(), GetY(), GetZ(), angle);
return;
}
}
Message(Chat::Skills, "You did not find any traps nearby.");
return;
}
void Client::Handle_OP_SetGuildMOTD(const EQApplicationPacket *app)
{
LogGuilds("Received OP_SetGuildMOTD");
if (app->size != sizeof(GuildMOTD_Struct)) {
// client calls for a motd on login even if they arent in a guild
printf("Error: app size of %i != size of GuildMOTD_Struct of %zu\n", app->size, sizeof(GuildMOTD_Struct));
return;
}
if (!IsInAGuild()) {
Message(Chat::Red, "You are not in a guild!");
return;
}
if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_MOTD)) {
Message(Chat::Red, "You do not have permissions to edit your guild's MOTD.");
return;
}
GuildMOTD_Struct* gmotd = (GuildMOTD_Struct*)app->pBuffer;
LogGuilds("Setting MOTD for [{}] ([{}]) to: [{}] - [{}]",
guild_mgr.GetGuildName(GuildID()), GuildID(), GetName(), gmotd->motd);
if (!guild_mgr.SetGuildMOTD(GuildID(), gmotd->motd, GetName())) {
Message(0, "Motd update failed.");
}
return;
}
void Client::Handle_OP_SetRunMode(const EQApplicationPacket *app)
{
if (app->size < sizeof(SetRunMode_Struct)) {
LogError("Received invalid sized OP_SetRunMode: got [{}], expected [{}]", app->size, sizeof(SetRunMode_Struct));
DumpPacket(app);
return;
}
SetRunMode_Struct* rms = (SetRunMode_Struct*)app->pBuffer;
if (rms->mode)
runmode = true;
else
runmode = false;
return;
}
void Client::Handle_OP_SetServerFilter(const EQApplicationPacket *app)
{
if (app->size != sizeof(SetServerFilter_Struct)) {
LogError("Received invalid sized OP_SetServerFilter: got [{}], expected [{}]", app->size, sizeof(SetServerFilter_Struct));
DumpPacket(app);
return;
}
SetServerFilter_Struct* filter = (SetServerFilter_Struct*)app->pBuffer;
ServerFilter(filter);
return;
}
void Client::Handle_OP_SetStartCity(const EQApplicationPacket *app)
{
// if the character has a start city, don't let them use the command
if (m_pp.binds[4].zone_id != 0 && m_pp.binds[4].zone_id != 189) {
Message(Chat::Yellow, "Your home city has already been set.", m_pp.binds[4].zone_id, ZoneName(m_pp.binds[4].zone_id));
return;
}
if (app->size < 1) {
LogError("Wrong size: OP_SetStartCity, size=[{}], expected [{}]", app->size, 1);
DumpPacket(app);
return;
}
float x = 0.0f, y = 0.0f, z = 0.0f, heading = 0.0f;
uint32 zone_id = 0;
uint32 start_city = (uint32)strtol((const char*)app->pBuffer, nullptr, 10);
std::string query = fmt::format(
SQL(
SELECT
`zone_id`, `bind_id`, `x`, `y`, `z`, `heading`
FROM
`start_zones`
WHERE
player_class = {}
AND
player_deity = {}
AND
player_race = {} {}
),
m_pp.class_,
m_pp.deity,
m_pp.race,
ContentFilterCriteria::apply().c_str()
);
auto results = content_db.QueryDatabase(query);
if (!results.Success()) {
LogError("No valid start zones found for /setstartcity");
return;
}
bool valid_city = false;
for (auto row = results.begin(); row != results.end(); ++row) {
if (atoi(row[1]) != 0)
zone_id = atoi(row[1]);
else
zone_id = atoi(row[0]);
if (zone_id != start_city)
continue;
valid_city = true;
x = atof(row[2]);
y = atof(row[3]);
z = atof(row[4]);
heading = atof(row[5]);
}
if (valid_city) {
Message(Chat::Yellow, "Your home city has been set");
SetStartZone(start_city, x, y, z, heading);
return;
}
query = fmt::format(
SQL(
SELECT
`zone_id`, `bind_id`
FROM
`start_zones`
WHERE
player_class = {}
AND
player_deity = {}
AND
player_race = {}
),
m_pp.class_,
m_pp.deity,
m_pp.race
);
results = content_db.QueryDatabase(query);
if (!results.Success())
return;
Message(Chat::Yellow, "Use \"/setstartcity #\" to choose a home city from the following list:");
for (auto row = results.begin(); row != results.end(); ++row) {
if (atoi(row[1]) != 0)
zone_id = atoi(row[1]);
else
zone_id = atoi(row[0]);
std::string zone_long_name = zone_store.GetZoneLongName(zone_id);
Message(Chat::Yellow, "%d - %s", zone_id, zone_long_name.c_str());
}
}
void Client::Handle_OP_SetTitle(const EQApplicationPacket *app)
{
if (app->size != sizeof(SetTitle_Struct)) {
LogDebug("Size mismatch in OP_SetTitle expected [{}] got [{}]", sizeof(SetTitle_Struct), app->size);
DumpPacket(app);
return;
}
SetTitle_Struct *sts = (SetTitle_Struct *)app->pBuffer;
std::string Title;
if (!sts->is_suffix)
{
Title = title_manager.GetPrefix(sts->title_id);
SetAATitle(Title.c_str());
}
else
{
Title = title_manager.GetSuffix(sts->title_id);
SetTitleSuffix(Title.c_str());
}
}
void Client::Handle_OP_Shielding(const EQApplicationPacket *app)
{
/*
/shield command mechanics
Warriors get this skill at level 30
Used by typing /shield while targeting a player
While active for the duration of 12 seconds baseline. The 'shield target' will take 50 pct less damage and
the 'shielder' will be hit with the damage taken by the 'shield target' after all applicable mitigiont is calculated,
the damage on the 'shielder' will be reduced by 25 percent, this reduction can be increased to 50 pct if equiping a shield.
You receive a 1% increase in mitigation for every 2 AC on the shield.
Shielder must stay with in a close distance (15 units) to your 'shield target'. If either move out of range, shield ends, no message given.
Both duration and shield range can be modified by AA.
Recast is 3 minutes.
For custom use cases, Mob::ShieldAbility can be used in quests with all parameters being altered. This functional
is also used for SPA 201 SE_PetShield, which functions in a simalar manner with pet shielding owner.
Note: If either the shielder or the shield target die all variables are reset on both.
*/
if (app->size != sizeof(Shielding_Struct)) {
LogError("OP size error: OP_Shielding expected:[{}] got:[{}]", sizeof(Shielding_Struct), app->size);
return;
}
if (GetLevel() < 30) { //Client gives message
return;
}
if (GetClass() != WARRIOR){
return;
}
pTimerType timer = pTimerShieldAbility;
if (!p_timers.Expired(&database, timer, false)) {
uint32 remain = p_timers.GetRemainingTime(timer);
Message(Chat::White, "You can use the ability /shield in %d minutes %d seconds.", ((remain) / 60), (remain % 60));
return;
}
Shielding_Struct* shield = (Shielding_Struct*)app->pBuffer;
if (ShieldAbility(shield->target_id, 15, 12000, 50, 25, true, false)) {
p_timers.Start(timer, SHIELD_ABILITY_RECAST_TIME);
}
return;
}
void Client::Handle_OP_ShopEnd(const EQApplicationPacket *app)
{
EQApplicationPacket empty(OP_ShopEndConfirm);
QueuePacket(&empty);
return;
}
void Client::Handle_OP_ShopPlayerBuy(const EQApplicationPacket *app)
{
if (app->size != sizeof(Merchant_Sell_Struct)) {
LogError("Invalid size on OP_ShopPlayerBuy: Expected [{}], Got [{}]",
sizeof(Merchant_Sell_Struct), app->size);
return;
}
RDTSC_Timer t1;
t1.start();
Merchant_Sell_Struct* mp = (Merchant_Sell_Struct*)app->pBuffer;
#if EQDEBUG >= 5
LogDebug("[{}], purchase item", GetName());
DumpPacket(app);
#endif
int merchantid;
bool tmpmer_used = false;
Mob* tmp = entity_list.GetMob(mp->npcid);
if (tmp == 0 || !tmp->IsNPC() || tmp->GetClass() != MERCHANT)
return;
if (mp->quantity < 1) return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, tmp->GetPosition()) > USE_NPC_RANGE2)
return;
merchantid = tmp->CastToNPC()->MerchantType;
uint32 item_id = 0;
std::list<MerchantList> merlist = zone->merchanttable[merchantid];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
if (mp->itemslot == ml.slot) {
item_id = ml.item;
break;
}
}
const EQ::ItemData* item = nullptr;
uint32 prevcharges = 0;
if (item_id == 0) { //check to see if its on the temporary table
std::list<TempMerchantList> tmp_merlist = zone->tmpmerchanttable[tmp->GetNPCTypeID()];
std::list<TempMerchantList>::const_iterator tmp_itr;
TempMerchantList ml;
for (tmp_itr = tmp_merlist.begin(); tmp_itr != tmp_merlist.end(); ++tmp_itr) {
ml = *tmp_itr;
if (mp->itemslot == ml.slot) {
item_id = ml.item;
tmpmer_used = true;
prevcharges = ml.charges;
break;
}
}
}
item = database.GetItem(item_id);
if (!item) {
//error finding item, client didnt get the update packet for whatever reason, roleplay a tad
Message(Chat::Yellow, "%s tells you 'Sorry, that item is for display purposes only.' as they take the item off the shelf.", tmp->GetCleanName());
auto delitempacket = new EQApplicationPacket(OP_ShopDelItem, sizeof(Merchant_DelItem_Struct));
Merchant_DelItem_Struct* delitem = (Merchant_DelItem_Struct*)delitempacket->pBuffer;
delitem->itemslot = mp->itemslot;
delitem->npcid = mp->npcid;
delitem->playerid = mp->playerid;
delitempacket->priority = 6;
entity_list.QueueCloseClients(tmp, delitempacket); //que for anyone that could be using the merchant so they see the update
safe_delete(delitempacket);
return;
}
if (CheckLoreConflict(item))
{
Message(Chat::Yellow, "You can only have one of a lore item.");
return;
}
if (tmpmer_used && (mp->quantity > prevcharges || item->MaxCharges > 1))
{
if (prevcharges > item->MaxCharges && item->MaxCharges > 1)
mp->quantity = item->MaxCharges;
else
mp->quantity = prevcharges;
}
// Item's stackable, but the quantity they want to buy exceeds the max stackable quantity.
if (item->Stackable && mp->quantity > item->StackSize)
mp->quantity = item->StackSize;
auto outapp = new EQApplicationPacket(OP_ShopPlayerBuy, sizeof(Merchant_Sell_Struct));
Merchant_Sell_Struct* mpo = (Merchant_Sell_Struct*)outapp->pBuffer;
mpo->quantity = mp->quantity;
mpo->playerid = mp->playerid;
mpo->npcid = mp->npcid;
mpo->itemslot = mp->itemslot;
int16 freeslotid = INVALID_INDEX;
int16 charges = 0;
if (item->Stackable || tmpmer_used)
charges = mp->quantity;
else if ( item->MaxCharges >= 1)
charges = item->MaxCharges;
EQ::ItemInstance* inst = database.CreateItem(item, charges);
int SinglePrice = 0;
if (RuleB(Merchant, UsePriceMod))
SinglePrice = (item->Price * (RuleR(Merchant, SellCostMod)) * item->SellRate * Client::CalcPriceMod(tmp, false));
else
SinglePrice = (item->Price * (RuleR(Merchant, SellCostMod)) * item->SellRate);
if (item->MaxCharges > 1)
mpo->price = SinglePrice;
else
mpo->price = SinglePrice * mp->quantity;
if (mpo->price < 0)
{
safe_delete(outapp);
safe_delete(inst);
return;
}
// this area needs some work..two inventory insertion check failure points
// below do not return player's money..is this the intended behavior?
if (!TakeMoneyFromPP(mpo->price))
{
auto hacker_str = fmt::format("Vendor Cheat: attempted to buy {} of {}: {} that cost {} cp but only has {} pp {} gp {} sp {} cp",
mpo->quantity, item->ID, item->Name,
mpo->price, m_pp.platinum, m_pp.gold, m_pp.silver, m_pp.copper);
database.SetMQDetectionFlag(AccountName(), GetName(), hacker_str, zone->GetShortName());
safe_delete(outapp);
safe_delete(inst);
return;
}
bool stacked = TryStacking(inst);
if (!stacked)
freeslotid = m_inv.FindFreeSlot(false, true, item->Size);
// shouldn't we be reimbursing if these two fail?
//make sure we are not completely full...
if (freeslotid == EQ::invslot::slotCursor) {
if (m_inv.GetItem(EQ::invslot::slotCursor) != nullptr) {
Message(Chat::Red, "You do not have room for any more items.");
safe_delete(outapp);
safe_delete(inst);
return;
}
}
if (!stacked && freeslotid == INVALID_INDEX)
{
Message(Chat::Red, "You do not have room for any more items.");
safe_delete(outapp);
safe_delete(inst);
return;
}
std::string packet;
if (!stacked && inst) {
PutItemInInventory(freeslotid, *inst);
SendItemPacket(freeslotid, inst, ItemPacketTrade);
}
else if (!stacked) {
LogError("OP_ShopPlayerBuy: item->ItemClass Unknown! Type: [{}]", item->ItemClass);
}
QueuePacket(outapp);
if (inst && tmpmer_used) {
int32 new_charges = prevcharges - mp->quantity;
zone->SaveTempItem(merchantid, tmp->GetNPCTypeID(), item_id, new_charges);
if (new_charges <= 0) {
auto delitempacket = new EQApplicationPacket(OP_ShopDelItem, sizeof(Merchant_DelItem_Struct));
Merchant_DelItem_Struct* delitem = (Merchant_DelItem_Struct*)delitempacket->pBuffer;
delitem->itemslot = mp->itemslot;
delitem->npcid = mp->npcid;
delitem->playerid = mp->playerid;
delitempacket->priority = 6;
entity_list.QueueClients(tmp, delitempacket); //que for anyone that could be using the merchant so they see the update
safe_delete(delitempacket);
}
else {
// Update the charges/quantity in the merchant window
inst->SetCharges(new_charges);
inst->SetPrice(SinglePrice);
inst->SetMerchantSlot(mp->itemslot);
inst->SetMerchantCount(new_charges);
SendItemPacket(mp->itemslot, inst, ItemPacketMerchant);
}
}
safe_delete(inst);
safe_delete(outapp);
// start QS code
// stacking purchases not supported at this time - entire process will need some work to catch them properly
if (RuleB(QueryServ, PlayerLogMerchantTransactions)) {
auto qspack =
new ServerPacket(ServerOP_QSPlayerLogMerchantTransactions,
sizeof(QSMerchantLogTransaction_Struct) + sizeof(QSTransactionItems_Struct));
QSMerchantLogTransaction_Struct* qsaudit = (QSMerchantLogTransaction_Struct*)qspack->pBuffer;
qsaudit->zone_id = zone->GetZoneID();
qsaudit->merchant_id = tmp->CastToNPC()->MerchantType;
qsaudit->merchant_money.platinum = 0;
qsaudit->merchant_money.gold = 0;
qsaudit->merchant_money.silver = 0;
qsaudit->merchant_money.copper = 0;
qsaudit->merchant_count = 1;
qsaudit->char_id = character_id;
qsaudit->char_money.platinum = (mpo->price / 1000);
qsaudit->char_money.gold = (mpo->price / 100) % 10;
qsaudit->char_money.silver = (mpo->price / 10) % 10;
qsaudit->char_money.copper = mpo->price % 10;
qsaudit->char_count = 0;
qsaudit->items[0].char_slot = freeslotid == INVALID_INDEX ? 0 : freeslotid;
qsaudit->items[0].item_id = item->ID;
qsaudit->items[0].charges = mpo->quantity;
const EQ::ItemInstance* audit_inst = m_inv[freeslotid];
if (audit_inst) {
qsaudit->items[0].aug_1 = audit_inst->GetAugmentItemID(0);
qsaudit->items[0].aug_2 = audit_inst->GetAugmentItemID(1);
qsaudit->items[0].aug_3 = audit_inst->GetAugmentItemID(2);
qsaudit->items[0].aug_4 = audit_inst->GetAugmentItemID(3);
qsaudit->items[0].aug_5 = audit_inst->GetAugmentItemID(4);
}
else {
qsaudit->items[0].aug_1 = 0;
qsaudit->items[0].aug_2 = 0;
qsaudit->items[0].aug_3 = 0;
qsaudit->items[0].aug_4 = 0;
qsaudit->items[0].aug_5 = 0;
if (freeslotid != INVALID_INDEX) {
LogError("Handle_OP_ShopPlayerBuy: QS Audit could not locate merchant ([{}]) purchased item in player ([{}]) inventory slot ([{}])",
qsaudit->merchant_id, qsaudit->char_id, freeslotid);
}
}
audit_inst = nullptr;
if (worldserver.Connected()) { worldserver.SendPacket(qspack); }
safe_delete(qspack);
}
// end QS code
if (RuleB(EventLog, RecordBuyFromMerchant))
LogMerchant(this, tmp, mpo->quantity, mpo->price, item, true);
if ((RuleB(Character, EnableDiscoveredItems)))
{
if (!GetGM() && !IsDiscovered(item_id))
DiscoverItem(item_id);
}
t1.stop();
std::cout << "At 1: " << t1.getDuration() << std::endl;
return;
}
void Client::Handle_OP_ShopPlayerSell(const EQApplicationPacket *app)
{
if (app->size != sizeof(Merchant_Purchase_Struct)) {
LogError("Invalid size on OP_ShopPlayerSell: Expected [{}], Got [{}]",
sizeof(Merchant_Purchase_Struct), app->size);
return;
}
RDTSC_Timer t1(true);
Merchant_Purchase_Struct* mp = (Merchant_Purchase_Struct*)app->pBuffer;
Mob* vendor = entity_list.GetMob(mp->npcid);
if (vendor == 0 || !vendor->IsNPC() || vendor->GetClass() != MERCHANT)
return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, vendor->GetPosition()) > USE_NPC_RANGE2)
return;
uint32 price = 0;
uint32 itemid = GetItemIDAt(mp->itemslot);
if (itemid == 0)
return;
const EQ::ItemData* item = database.GetItem(itemid);
EQ::ItemInstance* inst = GetInv().GetItem(mp->itemslot);
if (!item || !inst) {
Message(Chat::Red, "You seemed to have misplaced that item..");
return;
}
if (mp->quantity > 1)
{
if ((inst->GetCharges() < 0) || (mp->quantity > (uint32)inst->GetCharges()))
return;
}
if (!item->NoDrop) {
//Message(Chat::Red,"%s tells you, 'LOL NOPE'", vendor->GetName());
return;
}
uint32 cost_quantity = mp->quantity;
if (inst->IsCharged())
uint32 cost_quantity = 1;
uint32 i;
if (RuleB(Merchant, UsePriceMod)) {
for (i = 1; i <= cost_quantity; i++) {
price = (uint32)((item->Price * i)*(RuleR(Merchant, BuyCostMod))*Client::CalcPriceMod(vendor, true) + 0.5); // need to round up, because client does it automatically when displaying price
if (price > 4000000000) {
cost_quantity = i;
mp->quantity = i;
break;
}
}
}
else {
for (i = 1; i <= cost_quantity; i++) {
price = (uint32)((item->Price * i)*(RuleR(Merchant, BuyCostMod)) + 0.5); // need to round up, because client does it automatically when displaying price
if (price > 4000000000) {
cost_quantity = i;
mp->quantity = i;
break;
}
}
}
AddMoneyToPP(price, false);
if (inst->IsStackable() || inst->IsCharged())
{
unsigned int i_quan = inst->GetCharges();
if (mp->quantity > i_quan || inst->IsCharged())
mp->quantity = i_quan;
}
else
mp->quantity = 1;
if (RuleB(EventLog, RecordSellToMerchant))
LogMerchant(this, vendor, mp->quantity, price, item, false);
int charges = mp->quantity;
int freeslot = 0;
if ((freeslot = zone->SaveTempItem(vendor->CastToNPC()->MerchantType, vendor->GetNPCTypeID(), itemid, charges, true)) > 0) {
EQ::ItemInstance* inst2 = inst->Clone();
while (true) {
if (inst2 == nullptr)
break;
if (RuleB(Merchant, UsePriceMod)) {
inst2->SetPrice(item->Price*(RuleR(Merchant, SellCostMod))*item->SellRate*Client::CalcPriceMod(vendor, false));
}
else
inst2->SetPrice(item->Price*(RuleR(Merchant, SellCostMod))*item->SellRate);
inst2->SetMerchantSlot(freeslot);
uint32 MerchantQuantity = zone->GetTempMerchantQuantity(vendor->GetNPCTypeID(), freeslot);
if (inst2->IsStackable()) {
inst2->SetCharges(MerchantQuantity);
}
inst2->SetMerchantCount(MerchantQuantity);
SendItemPacket(freeslot - 1, inst2, ItemPacketMerchant);
safe_delete(inst2);
break;
}
}
// start QS code
if (RuleB(QueryServ, PlayerLogMerchantTransactions)) {
auto qspack =
new ServerPacket(ServerOP_QSPlayerLogMerchantTransactions,
sizeof(QSMerchantLogTransaction_Struct) + sizeof(QSTransactionItems_Struct));
QSMerchantLogTransaction_Struct* qsaudit = (QSMerchantLogTransaction_Struct*)qspack->pBuffer;
qsaudit->zone_id = zone->GetZoneID();
qsaudit->merchant_id = vendor->CastToNPC()->MerchantType;
qsaudit->merchant_money.platinum = (price / 1000);
qsaudit->merchant_money.gold = (price / 100) % 10;
qsaudit->merchant_money.silver = (price / 10) % 10;
qsaudit->merchant_money.copper = price % 10;
qsaudit->merchant_count = 0;
qsaudit->char_id = character_id;
qsaudit->char_money.platinum = 0;
qsaudit->char_money.gold = 0;
qsaudit->char_money.silver = 0;
qsaudit->char_money.copper = 0;
qsaudit->char_count = 1;
qsaudit->items[0].char_slot = mp->itemslot;
qsaudit->items[0].item_id = itemid;
qsaudit->items[0].charges = charges;
qsaudit->items[0].aug_1 = m_inv[mp->itemslot]->GetAugmentItemID(1);
qsaudit->items[0].aug_2 = m_inv[mp->itemslot]->GetAugmentItemID(2);
qsaudit->items[0].aug_3 = m_inv[mp->itemslot]->GetAugmentItemID(3);
qsaudit->items[0].aug_4 = m_inv[mp->itemslot]->GetAugmentItemID(4);
qsaudit->items[0].aug_5 = m_inv[mp->itemslot]->GetAugmentItemID(5);
if (worldserver.Connected()) { worldserver.SendPacket(qspack); }
safe_delete(qspack);
}
// end QS code
// Now remove the item from the player, this happens regardless of outcome
if (!inst->IsStackable())
this->DeleteItemInInventory(mp->itemslot, 0, false);
else {
// HACK: DeleteItemInInventory uses int8 for quantity type. There is no consistent use of types in code in this path so for now iteratively delete from inventory.
if (mp->quantity > 255) {
uint32 temp = mp->quantity;
while (temp > 255 && temp != 0) {
// Delete chunks of 255
this->DeleteItemInInventory(mp->itemslot, 255, false);
temp -= 255;
}
if (temp != 0) {
// Delete remaining
this->DeleteItemInInventory(mp->itemslot, temp, false);
}
}
else {
this->DeleteItemInInventory(mp->itemslot, mp->quantity, false);
}
}
//This forces the price to show up correctly for charged items.
if (inst->IsCharged())
mp->quantity = 1;
auto outapp = new EQApplicationPacket(OP_ShopPlayerSell, sizeof(Merchant_Purchase_Struct));
Merchant_Purchase_Struct* mco = (Merchant_Purchase_Struct*)outapp->pBuffer;
mco->npcid = vendor->GetID();
mco->itemslot = mp->itemslot;
mco->quantity = mp->quantity;
mco->price = price;
QueuePacket(outapp);
safe_delete(outapp);
SendMoneyUpdate();
t1.start();
Save(1);
t1.stop();
std::cout << "Save took: " << t1.getDuration() << std::endl;
return;
}
void Client::Handle_OP_ShopRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(Merchant_Click_Struct)) {
LogError("Wrong size: OP_ShopRequest, size=[{}], expected [{}]", app->size, sizeof(Merchant_Click_Struct));
return;
}
Merchant_Click_Struct* mc = (Merchant_Click_Struct*)app->pBuffer;
// Send back opcode OP_ShopRequest - tells client to open merchant window.
//EQApplicationPacket* outapp = new EQApplicationPacket(OP_ShopRequest, sizeof(Merchant_Click_Struct));
//Merchant_Click_Struct* mco=(Merchant_Click_Struct*)outapp->pBuffer;
int merchantid = 0;
Mob* tmp = entity_list.GetMob(mc->npcid);
if (tmp == 0 || !tmp->IsNPC() || tmp->GetClass() != MERCHANT)
return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, tmp->GetPosition()) > USE_NPC_RANGE2)
return;
merchantid = tmp->CastToNPC()->MerchantType;
int action = 1;
if (merchantid == 0) {
auto outapp = new EQApplicationPacket(OP_ShopRequest, sizeof(Merchant_Click_Struct));
Merchant_Click_Struct* mco = (Merchant_Click_Struct*)outapp->pBuffer;
mco->npcid = mc->npcid;
mco->playerid = 0;
mco->command = 1; //open...
mco->rate = 1.0;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
if (tmp->IsEngaged()) {
this->MessageString(Chat::White, MERCHANT_BUSY);
action = 0;
}
if (GetFeigned() || IsInvisible())
{
Message(0, "You cannot use a merchant right now.");
action = 0;
}
int primaryfaction = tmp->CastToNPC()->GetPrimaryFaction();
int factionlvl = GetFactionLevel(CharacterID(), tmp->CastToNPC()->GetNPCTypeID(), GetRace(), GetClass(), GetDeity(), primaryfaction, tmp);
if (factionlvl >= 7) {
MerchantRejectMessage(tmp, primaryfaction);
action = 0;
}
if (tmp->Charmed())
action = 0;
// 1199 I don't have time for that now. etc
if (!tmp->CastToNPC()->IsMerchantOpen()) {
tmp->SayString(zone->random.Int(1199, 1202));
action = 0;
}
auto outapp = new EQApplicationPacket(OP_ShopRequest, sizeof(Merchant_Click_Struct));
Merchant_Click_Struct* mco = (Merchant_Click_Struct*)outapp->pBuffer;
mco->npcid = mc->npcid;
mco->playerid = 0;
mco->command = action; // Merchant command 0x01 = open
if (RuleB(Merchant, UsePriceMod)) {
mco->rate = 1 / ((RuleR(Merchant, BuyCostMod))*Client::CalcPriceMod(tmp, true)); // works
}
else
mco->rate = 1 / (RuleR(Merchant, BuyCostMod));
outapp->priority = 6;
QueuePacket(outapp);
safe_delete(outapp);
if (action == 1)
BulkSendMerchantInventory(merchantid, tmp->GetNPCTypeID());
return;
}
void Client::Handle_OP_Sneak(const EQApplicationPacket *app)
{
if (!HasSkill(EQ::skills::SkillSneak) && GetSkill(EQ::skills::SkillSneak) == 0) {
return; //You cannot sneak if you do not have sneak
}
if (!p_timers.Expired(&database, pTimerSneak, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerSneak, SneakReuseTime - 1);
bool was = sneaking;
if (sneaking) {
sneaking = false;
hidden = false;
improved_hidden = false;
auto outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct* sa_out = (SpawnAppearance_Struct*)outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x03;
sa_out->parameter = 0;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
}
else {
CheckIncreaseSkill(EQ::skills::SkillSneak, nullptr, 5);
}
float hidechance = ((GetSkill(EQ::skills::SkillSneak) / 300.0f) + .25) * 100;
float random = zone->random.Real(0, 99);
if (!was && random < hidechance) {
sneaking = true;
}
auto outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct* sa_out = (SpawnAppearance_Struct*)outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x0F;
sa_out->parameter = sneaking;
QueuePacket(outapp);
safe_delete(outapp);
if (GetClass() == ROGUE) {
outapp = new EQApplicationPacket(OP_SimpleMessage, 12);
SimpleMessage_Struct *msg = (SimpleMessage_Struct *)outapp->pBuffer;
msg->color = 0x010E;
if (sneaking) {
msg->string_id = 347;
}
else {
msg->string_id = 348;
}
FastQueuePacket(&outapp);
}
return;
}
void Client::Handle_OP_SpawnAppearance(const EQApplicationPacket *app)
{
if (app->size != sizeof(SpawnAppearance_Struct)) {
std::cout << "Wrong size on OP_SpawnAppearance. Got: " << app->size << ", Expected: " << sizeof(SpawnAppearance_Struct) << std::endl;
return;
}
SpawnAppearance_Struct* sa = (SpawnAppearance_Struct*)app->pBuffer;
cheat_manager.ProcessSpawnApperance(sa->spawn_id, sa->type, sa->parameter);
if (sa->spawn_id != GetID())
return;
if (sa->type == AT_Invis) {
if (sa->parameter != 0)
{
if (!HasSkill(EQ::skills::SkillHide) && GetSkill(EQ::skills::SkillHide) == 0)
{
if (ClientVersion() < EQ::versions::ClientVersion::SoF)
{
auto hack_str = fmt::format("Player sent OP_SpawnAppearance with AT_Invis: {}", sa->parameter);
database.SetMQDetectionFlag(this->account_name, this->name, hack_str, zone->GetShortName());
}
}
return;
}
invisible = false;
hidden = false;
improved_hidden = false;
entity_list.QueueClients(this, app, true);
return;
}
else if (sa->type == AT_Anim) {
if (IsAIControlled())
return;
if (sa->parameter == ANIM_STAND) {
SetAppearance(eaStanding);
playeraction = 0;
SetFeigned(false);
BindWound(this, false, true);
camp_timer.Disable();
}
else if (sa->parameter == ANIM_SIT) {
SetAppearance(eaSitting);
playeraction = 1;
if (!UseBardSpellLogic())
InterruptSpell();
SetFeigned(false);
BindWound(this, false, true);
tmSitting = Timer::GetCurrentTime();
BuffFadeBySitModifier();
}
else if (sa->parameter == ANIM_CROUCH) {
if (!UseBardSpellLogic())
InterruptSpell();
SetAppearance(eaCrouching);
playeraction = 2;
SetFeigned(false);
}
else if (sa->parameter == ANIM_DEATH) { // feign death too
SetAppearance(eaDead);
playeraction = 3;
InterruptSpell();
}
else if (sa->parameter == ANIM_LOOT) {
SetAppearance(eaLooting);
playeraction = 4;
SetFeigned(false);
}
else {
LogError("Client [{}] :: unknown appearance [{}]", name, (int)sa->parameter);
return;
}
entity_list.QueueClients(this, app, true);
}
else if (sa->type == AT_Anon) {
if (!anon_toggle_timer.Check()) {
return;
}
// For Anon/Roleplay
if (sa->parameter == 1) { // Anon
m_pp.anon = 1;
}
else if ((sa->parameter == 2) || (sa->parameter == 3)) { // This is Roleplay, or anon+rp
m_pp.anon = 2;
}
else if (sa->parameter == 0) { // This is Non-Anon
m_pp.anon = 0;
}
else {
LogError("Client [{}] :: unknown Anon/Roleplay Switch [{}]", name, (int)sa->parameter);
return;
}
entity_list.QueueClients(this, app, true);
UpdateWho();
}
else if ((sa->type == AT_HP) && (dead == 0)) {
return;
}
else if (sa->type == AT_AFK) {
if (afk_toggle_timer.Check()) {
AFK = (sa->parameter == 1);
entity_list.QueueClients(this, app, true);
}
}
else if (sa->type == AT_Split) {
m_pp.autosplit = (sa->parameter == 1);
}
else if (sa->type == AT_Sneak) {
if (sneaking == 0)
return;
if (sa->parameter != 0)
{
if (!HasSkill(EQ::skills::SkillSneak))
{
auto hack_str = fmt::format("Player sent OP_SpawnAppearance with AT_Sneak: {}", sa->parameter);
database.SetMQDetectionFlag(this->account_name, this->name, hack_str, zone->GetShortName());
}
return;
}
sneaking = 0;
entity_list.QueueClients(this, app, true);
}
else if (sa->type == AT_Size)
{
auto hack_str = fmt::format("Player sent OP_SpawnAppearance with AT_Size: {}", sa->parameter);
database.SetMQDetectionFlag(this->account_name, this->name, hack_str, zone->GetShortName());
}
else if (sa->type == AT_Light) // client emitting light (lightstone, shiny shield)
{
//don't do anything with this
}
else if (sa->type == AT_Levitate)
{
// don't do anything with this, we tell the client when it's
// levitating, not the other way around
}
else if (sa->type == AT_ShowHelm)
{
if (helm_toggle_timer.Check()) {
m_pp.showhelm = (sa->parameter == 1);
entity_list.QueueClients(this, app, true);
}
}
else if (sa->type == AT_GroupConsent)
{
m_pp.groupAutoconsent = (sa->parameter == 1);
ConsentCorpses("Group", (sa->parameter != 1));
}
else if (sa->type == AT_RaidConsent)
{
m_pp.raidAutoconsent = (sa->parameter == 1);
ConsentCorpses("Raid", (sa->parameter != 1));
}
else if (sa->type == AT_GuildConsent)
{
m_pp.guildAutoconsent = (sa->parameter == 1);
ConsentCorpses("Guild", (sa->parameter != 1));
}
else {
std::cout << "Unknown SpawnAppearance type: 0x" << std::hex << std::setw(4) << std::setfill('0') << sa->type << std::dec
<< " value: 0x" << std::hex << std::setw(8) << std::setfill('0') << sa->parameter << std::dec << std::endl;
}
return;
}
void Client::Handle_OP_Split(const EQApplicationPacket *app)
{
if (app->size != sizeof(Split_Struct)) {
LogError("Wrong size: OP_Split, size=[{}], expected [{}]", app->size, sizeof(Split_Struct));
return;
}
// The client removes the money on its own, but we have to
// update our state anyway, and make sure they had enough to begin
// with.
Split_Struct *split = (Split_Struct *)app->pBuffer;
//Per the note above, Im not exactly sure what to do on error
//to notify the client of the error...
Group *group = nullptr;
Raid *raid = nullptr;
if (IsRaidGrouped())
raid = GetRaid();
else if (IsGrouped())
group = GetGroup();
// is there an actual error message for this?
if (raid == nullptr && group == nullptr) {
Message(Chat::Red, "You can not split money if you're not in a group.");
return;
}
if (!TakeMoneyFromPP(static_cast<uint64>(split->copper) +
10 * static_cast<uint64>(split->silver) +
100 * static_cast<uint64>(split->gold) +
1000 * static_cast<uint64>(split->platinum))) {
Message(Chat::Red, "You do not have enough money to do that split.");
return;
}
if (raid)
raid->SplitMoney(raid->GetGroup(this), split->copper, split->silver, split->gold, split->platinum);
else if (group)
group->SplitMoney(split->copper, split->silver, split->gold, split->platinum);
return;
}
void Client::Handle_OP_Surname(const EQApplicationPacket *app)
{
if (app->size != sizeof(Surname_Struct))
{
LogDebug("Size mismatch in Surname expected [{}] got [{}]", sizeof(Surname_Struct), app->size);
return;
}
if (!p_timers.Expired(&database, pTimerSurnameChange, false) && !GetGM())
{
Message(Chat::Yellow, "You may only change surnames once every 7 days, your /surname is currently on cooldown.");
return;
}
if (GetLevel() < 20)
{
MessageString(Chat::Yellow, SURNAME_LEVEL);
return;
}
Surname_Struct* surname = (Surname_Struct*)app->pBuffer;
char *c = nullptr;
bool first = true;
for (c = surname->lastname; *c; c++)
{
if (first)
{
*c = toupper(*c);
first = false;
}
else
{
*c = tolower(*c);
}
}
if (strlen(surname->lastname) >= 20) {
MessageString(Chat::Yellow, SURNAME_TOO_LONG);
return;
}
if (!database.CheckNameFilter(surname->lastname, true))
{
MessageString(Chat::Yellow, SURNAME_REJECTED);
return;
}
ChangeLastName(surname->lastname);
p_timers.Start(pTimerSurnameChange, 604800);
EQApplicationPacket* outapp = app->Copy();
outapp = app->Copy();
surname = (Surname_Struct*)outapp->pBuffer;
surname->unknown0064 = 1;
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_SwapSpell(const EQApplicationPacket *app)
{
if (app->size != sizeof(SwapSpell_Struct)) {
std::cout << "Wrong size on OP_SwapSpell. Got: " << app->size << ", Expected: " << sizeof(SwapSpell_Struct) << std::endl;
return;
}
const SwapSpell_Struct* swapspell = (const SwapSpell_Struct*)app->pBuffer;
int swapspelltemp;
const auto sbs = EQ::spells::DynamicLookup(ClientVersion(), GetGM())->SpellbookSize;
if (swapspell->from_slot < 0 || swapspell->from_slot >= sbs)
return;
if (swapspell->to_slot < 0 || swapspell->to_slot >= sbs)
return;
swapspelltemp = m_pp.spell_book[swapspell->from_slot];
if (swapspelltemp < 0) {
return;
}
m_pp.spell_book[swapspell->from_slot] = m_pp.spell_book[swapspell->to_slot];
m_pp.spell_book[swapspell->to_slot] = swapspelltemp;
/* Save Spell Swaps */
if (!database.SaveCharacterSpell(this->CharacterID(), m_pp.spell_book[swapspell->from_slot], swapspell->from_slot)) {
database.DeleteCharacterSpell(this->CharacterID(), m_pp.spell_book[swapspell->from_slot], swapspell->from_slot);
}
if (!database.SaveCharacterSpell(this->CharacterID(), swapspelltemp, swapspell->to_slot)) {
database.DeleteCharacterSpell(this->CharacterID(), swapspelltemp, swapspell->to_slot);
}
QueuePacket(app);
return;
}
void Client::Handle_OP_TargetCommand(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClientTarget_Struct)) {
LogError("OP size error: OP_TargetMouse expected:[{}] got:[{}]", sizeof(ClientTarget_Struct), app->size);
return;
}
if (GetTarget())
{
GetTarget()->IsTargeted(-1);
}
// Locate and cache new target
ClientTarget_Struct* ct = (ClientTarget_Struct*)app->pBuffer;
pClientSideTarget = ct->new_target;
if (!IsAIControlled())
{
Mob *nt = entity_list.GetMob(ct->new_target);
if (nt)
{
SetTarget(nt);
bool inspect_buffs = false;
// rank 1 gives you ability to see NPC buffs in target window (SoD+)
if (nt->IsNPC()) {
if (IsRaidGrouped()) {
Raid *raid = GetRaid();
if (raid) {
uint32 gid = raid->GetGroup(this);
if (gid < 12 && raid->GroupCount(gid) > 2)
inspect_buffs = raid->GetLeadershipAA(groupAAInspectBuffs, gid);
}
}
else {
Group *group = GetGroup();
if (group && group->GroupCount() > 2)
inspect_buffs = group->GetLeadershipAA(groupAAInspectBuffs);
}
}
if (GetGM() || RuleB(Spells, AlwaysSendTargetsBuffs) || nt == this || inspect_buffs || (nt->IsClient() && !nt->CastToClient()->GetPVP()) ||
(nt->IsPet() && nt->GetOwner() && nt->GetOwner()->IsClient() && !nt->GetOwner()->CastToClient()->GetPVP()) ||
#ifdef BOTS
(nt->IsBot() && nt->GetOwner() && nt->GetOwner()->IsClient() && !nt->GetOwner()->CastToClient()->GetPVP()) || // TODO: bot pets
#endif
(nt->IsMerc() && nt->GetOwner() && nt->GetOwner()->IsClient() && !nt->GetOwner()->CastToClient()->GetPVP()))
{
nt->SendBuffsToClient(this);
}
}
else
{
SetTarget(nullptr);
SetHoTT(0);
UpdateXTargetType(TargetsTarget, nullptr);
Group *g = GetGroup();
if (g && g->HasRole(this, RoleAssist))
g->SetGroupAssistTarget(0);
if (g && g->HasRole(this, RoleTank))
g->SetGroupTankTarget(0);
if (g && g->HasRole(this, RolePuller))
g->SetGroupPullerTarget(0);
return;
}
}
else
{
SetTarget(nullptr);
SetHoTT(0);
UpdateXTargetType(TargetsTarget, nullptr);
return;
}
// HoTT
if (GetTarget() && GetTarget()->GetTarget())
{
SetHoTT(GetTarget()->GetTarget()->GetID());
UpdateXTargetType(TargetsTarget, GetTarget()->GetTarget());
}
else
{
SetHoTT(0);
UpdateXTargetType(TargetsTarget, nullptr);
}
Group *g = GetGroup();
if (g && g->HasRole(this, RoleAssist))
g->SetGroupAssistTarget(GetTarget());
if (g && g->HasRole(this, RoleTank))
g->SetGroupTankTarget(GetTarget());
if (g && g->HasRole(this, RolePuller))
g->SetGroupPullerTarget(GetTarget());
// For /target, send reject or success packet
if (app->GetOpcode() == OP_TargetCommand) {
if (GetTarget() && !GetTarget()->CastToMob()->IsInvisible(this) && (DistanceSquared(m_Position, GetTarget()->GetPosition()) <= TARGETING_RANGE*TARGETING_RANGE || GetGM())) {
if (GetTarget()->GetBodyType() == BT_NoTarget2 || GetTarget()->GetBodyType() == BT_Special
|| GetTarget()->GetBodyType() == BT_NoTarget)
{
//Targeting something we shouldn't with /target
//but the client allows this without MQ so you don't flag it
auto outapp = new EQApplicationPacket(OP_TargetReject, sizeof(TargetReject_Struct));
outapp->pBuffer[0] = 0x2f;
outapp->pBuffer[1] = 0x01;
outapp->pBuffer[4] = 0x0d;
if (GetTarget())
{
SetTarget(nullptr);
}
QueuePacket(outapp);
safe_delete(outapp);
return;
}
QueuePacket(app);
GetTarget()->IsTargeted(1);
SendHPUpdate();
}
else
{
auto outapp = new EQApplicationPacket(OP_TargetReject, sizeof(TargetReject_Struct));
outapp->pBuffer[0] = 0x2f;
outapp->pBuffer[1] = 0x01;
outapp->pBuffer[4] = 0x0d;
if (GetTarget())
{
SetTarget(nullptr);
}
QueuePacket(outapp);
safe_delete(outapp);
}
}
else
{
if (GetTarget())
{
if (GetGM())
{
GetTarget()->IsTargeted(1);
return;
}
else if (RuleB(Character, AllowMQTarget))
{
GetTarget()->IsTargeted(1);
return;
}
else if (cheat_manager.GetExemptStatus(Assist)) {
GetTarget()->IsTargeted(1);
cheat_manager.SetExemptStatus(Assist, false);
return;
}
else if (GetTarget()->IsClient())
{
//make sure this client is in our raid/group
GetTarget()->IsTargeted(1);
return;
}
else if (GetTarget()->GetBodyType() == BT_NoTarget2 || GetTarget()->GetBodyType() == BT_Special
|| GetTarget()->GetBodyType() == BT_NoTarget)
{
auto hacker_str = fmt::format("{} attempting to target something untargetable, {} bodytype: {}",
GetName(), GetTarget()->GetName(), (int)GetTarget()->GetBodyType());
database.SetMQDetectionFlag(AccountName(), GetName(), hacker_str, zone->GetShortName());
SetTarget((Mob*)nullptr);
return;
}
else if (cheat_manager.GetExemptStatus(Port)) {
GetTarget()->IsTargeted(1);
return;
}
else if (cheat_manager.GetExemptStatus(Sense)) {
GetTarget()->IsTargeted(1);
cheat_manager.SetExemptStatus(Sense, false);
return;
}
else if (IsXTarget(GetTarget()))
{
GetTarget()->IsTargeted(1);
return;
}
else if (GetTarget()->IsPetOwnerClient())
{
GetTarget()->IsTargeted(1);
return;
}
else if (GetBindSightTarget())
{
if (DistanceSquared(GetBindSightTarget()->GetPosition(), GetTarget()->GetPosition()) > (zone->newzone_data.maxclip*zone->newzone_data.maxclip))
{
if (DistanceSquared(m_Position, GetTarget()->GetPosition()) > (zone->newzone_data.maxclip*zone->newzone_data.maxclip))
{
auto hacker_str = fmt::format(
"{} attempting to target something beyond the clip plane of {:.2f} "
"units, from ({:.2f}, {:.2f}, {:.2f}) to {} ({:.2f}, {:.2f}, "
"{:.2f})",
GetName(),
(zone->newzone_data.maxclip * zone->newzone_data.maxclip), GetX(),
GetY(), GetZ(), GetTarget()->GetName(), GetTarget()->GetX(),
GetTarget()->GetY(), GetTarget()->GetZ());
database.SetMQDetectionFlag(AccountName(), GetName(), hacker_str, zone->GetShortName());
SetTarget(nullptr);
return;
}
}
}
else if (DistanceSquared(m_Position, GetTarget()->GetPosition()) > (zone->newzone_data.maxclip*zone->newzone_data.maxclip))
{
auto hacker_str =
fmt::format("{} attempting to target something beyond the clip plane of {:.2f} "
"units, from ({:.2f}, {:.2f}, {:.2f}) to {} ({:.2f}, {:.2f}, {:.2f})",
GetName(), (zone->newzone_data.maxclip * zone->newzone_data.maxclip),
GetX(), GetY(), GetZ(), GetTarget()->GetName(), GetTarget()->GetX(),
GetTarget()->GetY(), GetTarget()->GetZ());
database.SetMQDetectionFlag(AccountName(), GetName(), hacker_str, zone->GetShortName());
SetTarget(nullptr);
return;
}
GetTarget()->IsTargeted(1);
}
}
return;
}
void Client::Handle_OP_TargetMouse(const EQApplicationPacket *app)
{
Handle_OP_TargetCommand(app);
}
void Client::Handle_OP_TaskHistoryRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(TaskHistoryRequest_Struct)) {
LogDebug("Size mismatch in OP_TaskHistoryRequest expected [{}] got [{}]", sizeof(TaskHistoryRequest_Struct), app->size);
DumpPacket(app);
return;
}
TaskHistoryRequest_Struct *ths = (TaskHistoryRequest_Struct*)app->pBuffer;
if (RuleB(TaskSystem, EnableTaskSystem) && task_state)
task_state->SendTaskHistory(this, ths->TaskIndex);
}
void Client::Handle_OP_Taunt(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClientTarget_Struct)) {
std::cout << "Wrong size on OP_Taunt. Got: " << app->size << ", Expected: " << sizeof(ClientTarget_Struct) << std::endl;
return;
}
if (!p_timers.Expired(&database, pTimerTaunt, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerTaunt, TauntReuseTime - 1);
if (GetTarget() == nullptr || !GetTarget()->IsNPC())
return;
if (!zone->CanDoCombat()) {
Message(Chat::Red, "You cannot taunt in a no combat zone.");
return;
}
Taunt(GetTarget()->CastToNPC(), false);
return;
}
void Client::Handle_OP_TestBuff(const EQApplicationPacket *app)
{
if (!RuleB(Character, EnableTestBuff)) {
return;
}
parse->EventPlayer(EVENT_TEST_BUFF, this, "", 0);
return;
}
void Client::Handle_OP_TGB(const EQApplicationPacket *app)
{
OPTGB(app);
return;
}
void Client::Handle_OP_Track(const EQApplicationPacket *app)
{
if (GetClass() != RANGER && GetClass() != DRUID && GetClass() != BARD)
return;
if (GetSkill(EQ::skills::SkillTracking) == 0)
SetSkill(EQ::skills::SkillTracking, 1);
else
CheckIncreaseSkill(EQ::skills::SkillTracking, nullptr, 15);
if (!entity_list.MakeTrackPacket(this))
LogError("Unable to generate OP_Track packet requested by client");
return;
}
void Client::Handle_OP_TrackTarget(const EQApplicationPacket *app)
{
int PlayerClass = GetClass();
if ((PlayerClass != RANGER) && (PlayerClass != DRUID) && (PlayerClass != BARD))
return;
if (app->size != sizeof(TrackTarget_Struct))
{
LogError("Invalid size for OP_TrackTarget: Expected: [{}], Got: [{}]",
sizeof(TrackTarget_Struct), app->size);
return;
}
TrackTarget_Struct *tts = (TrackTarget_Struct*)app->pBuffer;
TrackingID = tts->EntityID;
}
void Client::Handle_OP_TrackUnknown(const EQApplicationPacket *app)
{
// size 0 send right after OP_Track
return;
}
void Client::Handle_OP_TradeAcceptClick(const EQApplicationPacket *app)
{
Mob* with = trade->With();
trade->state = TradeAccepted;
if (with && with->IsClient()) {
//finish trade...
// Have both accepted?
Client* other = with->CastToClient();
other->QueuePacket(app);
if (other->trade->state == trade->state) {
other->trade->state = TradeCompleting;
trade->state = TradeCompleting;
if (CheckTradeLoreConflict(other) || other->CheckTradeLoreConflict(this)) {
MessageString(Chat::Red, TRADE_CANCEL_LORE);
other->MessageString(Chat::Red, TRADE_CANCEL_LORE);
this->FinishTrade(this);
other->FinishTrade(other);
other->trade->Reset();
trade->Reset();
}
else if (CheckTradeNonDroppable()) {
MessageString(Chat::Red, TRADE_HAS_BEEN_CANCELLED);
other->MessageString(Chat::Red, TRADE_HAS_BEEN_CANCELLED);
this->FinishTrade(this);
other->FinishTrade(other);
other->trade->Reset();
trade->Reset();
Message(Chat::Yellow, "Hacking activity detected in trade transaction.");
// TODO: query (this) as a hacker
}
else if (other->CheckTradeNonDroppable()) {
MessageString(Chat::Red, TRADE_HAS_BEEN_CANCELLED);
other->MessageString(Chat::Red, TRADE_HAS_BEEN_CANCELLED);
this->FinishTrade(this);
other->FinishTrade(other);
other->trade->Reset();
trade->Reset();
other->Message(Chat::Yellow, "Hacking activity detected in trade transaction.");
// TODO: query (other) as a hacker
}
else {
// Audit trade to database for both trade streams
other->trade->LogTrade();
trade->LogTrade();
// start QS code
if (RuleB(QueryServ, PlayerLogTrades)) {
QSPlayerLogTrade_Struct event_entry;
std::list<void*> event_details;
memset(&event_entry, 0, sizeof(QSPlayerLogTrade_Struct));
// Perform actual trade
this->FinishTrade(other, true, &event_entry, &event_details);
other->FinishTrade(this, false, &event_entry, &event_details);
event_entry._detail_count = event_details.size();
auto qs_pack = new ServerPacket(
ServerOP_QSPlayerLogTrades,
sizeof(QSPlayerLogTrade_Struct) +
(sizeof(QSTradeItems_Struct) * event_entry._detail_count));
QSPlayerLogTrade_Struct* qs_buf = (QSPlayerLogTrade_Struct*)qs_pack->pBuffer;
memcpy(qs_buf, &event_entry, sizeof(QSPlayerLogTrade_Struct));
int offset = 0;
for (auto iter = event_details.begin(); iter != event_details.end();
++iter, ++offset) {
QSTradeItems_Struct* detail = reinterpret_cast<QSTradeItems_Struct*>(*iter);
qs_buf->items[offset] = *detail;
safe_delete(detail);
}
event_details.clear();
if (worldserver.Connected())
worldserver.SendPacket(qs_pack);
safe_delete(qs_pack);
// end QS code
}
else {
this->FinishTrade(other);
other->FinishTrade(this);
}
other->trade->Reset();
trade->Reset();
}
// All done
auto outapp = new EQApplicationPacket(OP_FinishTrade, 0);
other->QueuePacket(outapp);
this->FastQueuePacket(&outapp);
}
}
// Trading with a Mob object that is not a Client.
else if (with) {
auto outapp = new EQApplicationPacket(OP_FinishTrade, 0);
QueuePacket(outapp);
safe_delete(outapp);
if (with->IsNPC()) {
// Audit trade to database for player trade stream
if (RuleB(QueryServ, PlayerLogHandins)) {
QSPlayerLogHandin_Struct event_entry;
std::list<void*> event_details;
memset(&event_entry, 0, sizeof(QSPlayerLogHandin_Struct));
FinishTrade(with->CastToNPC(), false, &event_entry, &event_details);
event_entry._detail_count = event_details.size();
auto qs_pack =
new ServerPacket(ServerOP_QSPlayerLogHandins,
sizeof(QSPlayerLogHandin_Struct) +
(sizeof(QSHandinItems_Struct) * event_entry._detail_count));
QSPlayerLogHandin_Struct* qs_buf = (QSPlayerLogHandin_Struct*)qs_pack->pBuffer;
memcpy(qs_buf, &event_entry, sizeof(QSPlayerLogHandin_Struct));
int offset = 0;
for (auto iter = event_details.begin(); iter != event_details.end(); ++iter, ++offset) {
QSHandinItems_Struct* detail = reinterpret_cast<QSHandinItems_Struct*>(*iter);
qs_buf->items[offset] = *detail;
safe_delete(detail);
}
event_details.clear();
if (worldserver.Connected())
worldserver.SendPacket(qs_pack);
safe_delete(qs_pack);
}
else {
FinishTrade(with->CastToNPC());
}
}
#ifdef BOTS
// TODO: Log Bot trades
else if (with->IsBot())
with->CastToBot()->FinishTrade(this, Bot::BotTradeClientNormal);
#endif
trade->Reset();
}
return;
}
void Client::Handle_OP_TradeBusy(const EQApplicationPacket *app)
{
if (app->size != sizeof(TradeBusy_Struct)) {
LogError("Wrong size: OP_TradeBusy, size=[{}], expected [{}]", app->size, sizeof(TradeBusy_Struct));
return;
}
// Trade request recipient is cancelling the trade due to being busy
// Trade requester gets message "I'm busy right now"
// Send busy message on to trade initiator if client
TradeBusy_Struct* msg = (TradeBusy_Struct*)app->pBuffer;
Mob* tradee = entity_list.GetMob(msg->to_mob_id);
if (tradee && tradee->IsClient()) {
tradee->CastToClient()->QueuePacket(app);
}
return;
}
void Client::Handle_OP_Trader(const EQApplicationPacket *app)
{
// Bazaar Trader:
//
// SoF sends 1 or more unhandled OP_Trader packets of size 96 when a trade has completed.
// I don't know what they are for (yet), but it doesn't seem to matter that we ignore them.
uint32 max_items = 80;
/*
if (GetClientVersion() >= EQClientRoF)
max_items = 200;
*/
//Show Items
if (app->size == sizeof(Trader_ShowItems_Struct))
{
Trader_ShowItems_Struct* sis = (Trader_ShowItems_Struct*)app->pBuffer;
switch (sis->Code)
{
case BazaarTrader_EndTraderMode: {
Trader_EndTrader();
LogTrading("Client::Handle_OP_Trader: End Trader Session");
break;
}
case BazaarTrader_EndTransaction: {
Client* c = entity_list.GetClientByID(sis->TraderID);
if (c)
{
c->WithCustomer(0);
LogTrading("Client::Handle_OP_Trader: End Transaction");
}
else
LogTrading("Client::Handle_OP_Trader: Null Client Pointer");
break;
}
case BazaarTrader_ShowItems: {
Trader_ShowItems();
LogTrading("Client::Handle_OP_Trader: Show Trader Items");
break;
}
default: {
LogTrading("Unhandled action code in OP_Trader ShowItems_Struct");
break;
}
}
}
else if (app->size == sizeof(ClickTrader_Struct))
{
if (Buyer) {
Trader_EndTrader();
Message(Chat::Red, "You cannot be a Trader and Buyer at the same time.");
return;
}
ClickTrader_Struct* ints = (ClickTrader_Struct*)app->pBuffer;
if (ints->Code == BazaarTrader_StartTraderMode)
{
GetItems_Struct* gis = GetTraderItems();
LogTrading("Client::Handle_OP_Trader: Start Trader Mode");
// Verify there are no NODROP or items with a zero price
bool TradeItemsValid = true;
for (uint32 i = 0; i < max_items; i++) {
if (gis->Items[i] == 0) break;
if (ints->ItemCost[i] == 0) {
Message(Chat::Red, "Item in Trader Satchel with no price. Unable to start trader mode");
TradeItemsValid = false;
break;
}
const EQ::ItemData *Item = database.GetItem(gis->Items[i]);
if (!Item) {
Message(Chat::Red, "Unexpected error. Unable to start trader mode");
TradeItemsValid = false;
break;
}
if (Item->NoDrop == 0) {
Message(Chat::Red, "NODROP Item in Trader Satchel. Unable to start trader mode");
TradeItemsValid = false;
break;
}
}
if (!TradeItemsValid) {
Trader_EndTrader();
return;
}
for (uint32 i = 0; i < max_items; i++) {
if (database.GetItem(gis->Items[i])) {
database.SaveTraderItem(this->CharacterID(), gis->Items[i], gis->SerialNumber[i],
gis->Charges[i], ints->ItemCost[i], i);
auto inst = FindTraderItemBySerialNumber(gis->SerialNumber[i]);
if (inst)
inst->SetPrice(ints->ItemCost[i]);
}
else {
//return; //sony doesnt memset so assume done on first bad item
break;
}
}
safe_delete(gis);
this->Trader_StartTrader();
// This refreshes the Trader window to display the End Trader button
if (ClientVersion() >= EQ::versions::ClientVersion::RoF)
{
auto outapp = new EQApplicationPacket(OP_Trader, sizeof(TraderStatus_Struct));
TraderStatus_Struct* tss = (TraderStatus_Struct*)outapp->pBuffer;
tss->Code = BazaarTrader_StartTraderMode2;
QueuePacket(outapp);
safe_delete(outapp);
}
}
else {
LogTrading("Client::Handle_OP_Trader: Unknown TraderStruct code of: [{}]\n",
ints->Code);
LogError("Unknown TraderStruct code of: [{}]\n", ints->Code);
}
}
else if (app->size == sizeof(TraderStatus_Struct))
{
TraderStatus_Struct* tss = (TraderStatus_Struct*)app->pBuffer;
LogTrading("Client::Handle_OP_Trader: Trader Status Code: [{}]", tss->Code);
switch (tss->Code)
{
case BazaarTrader_EndTraderMode: {
Trader_EndTrader();
LogTrading("Client::Handle_OP_Trader: End Trader Session");
break;
}
case BazaarTrader_ShowItems: {
Trader_ShowItems();
LogTrading("Client::Handle_OP_Trader: Show Trader Items");
break;
}
default: {
LogTrading("Unhandled action code in OP_Trader ShowItems_Struct");
break;
}
}
}
else if (app->size == sizeof(TraderPriceUpdate_Struct))
{
LogTrading("Client::Handle_OP_Trader: Trader Price Update");
HandleTraderPriceUpdate(app);
}
else {
LogTrading("Unknown size for OP_Trader: [{}]\n", app->size);
LogError("Unknown size for OP_Trader: [{}]\n", app->size);
DumpPacket(app);
return;
}
return;
}
void Client::Handle_OP_TraderBuy(const EQApplicationPacket *app)
{
// Bazaar Trader:
//
// Client has elected to buy an item from a Trader
//
if (app->size != sizeof(TraderBuy_Struct)) {
LogError("Wrong size: OP_TraderBuy, size=[{}], expected [{}]", app->size, sizeof(TraderBuy_Struct));
return;
}
TraderBuy_Struct* tbs = (TraderBuy_Struct*)app->pBuffer;
if (Client* Trader = entity_list.GetClientByID(tbs->TraderID)) {
BuyTraderItem(tbs, Trader, app);
LogTrading("Client::Handle_OP_TraderBuy: Buy Trader Item ");
}
else {
LogTrading("Client::Handle_OP_TraderBuy: Null Client Pointer");
}
return;
}
void Client::Handle_OP_TradeRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(TradeRequest_Struct)) {
LogError("Wrong size: OP_TradeRequest, size=[{}], expected [{}]", app->size, sizeof(TradeRequest_Struct));
return;
}
// Client requesting a trade session from an npc/client
// Trade session not started until OP_TradeRequestAck is sent
TradeRequest_Struct* msg = (TradeRequest_Struct*)app->pBuffer;
Mob* tradee = entity_list.GetMob(msg->to_mob_id);
// If the tradee is an untargettable mob - ignore
// Helps in cases where servers use invisible_man, body type 11 for quests
// and the client opens a trade by mistake.
if (tradee && (tradee->GetBodyType() == 11)) {
return;
}
CommonBreakInvisible();
// Pass trade request on to recipient
if (tradee && tradee->IsClient()) {
tradee->CastToClient()->QueuePacket(app);
}
#ifndef BOTS
else if (tradee && tradee->IsNPC()) {
#else
else if (tradee && (tradee->IsNPC() || tradee->IsBot())) {
#endif
if (!tradee->IsEngaged()) {
trade->Start(msg->to_mob_id);
EQApplicationPacket *outapp = new EQApplicationPacket(OP_TradeRequestAck, sizeof(TradeRequest_Struct));
TradeRequest_Struct *acc = (TradeRequest_Struct *) outapp->pBuffer;
acc->from_mob_id = msg->to_mob_id;
acc->to_mob_id = msg->from_mob_id;
FastQueuePacket(&outapp);
safe_delete(outapp);
}
}
return;
}
void Client::Handle_OP_TradeRequestAck(const EQApplicationPacket *app)
{
if (app->size != sizeof(TradeRequest_Struct)) {
LogError("Wrong size: OP_TradeRequestAck, size=[{}], expected [{}]", app->size, sizeof(TradeRequest_Struct));
return;
}
// Trade request recipient is acknowledging they are able to trade
// After this, the trade session has officially started
// Send ack on to trade initiator if client
TradeRequest_Struct* msg = (TradeRequest_Struct*)app->pBuffer;
Mob* tradee = entity_list.GetMob(msg->to_mob_id);
if (tradee && tradee->IsClient()) {
trade->Start(msg->to_mob_id);
tradee->CastToClient()->QueuePacket(app);
}
return;
}
void Client::Handle_OP_TraderShop(const EQApplicationPacket *app)
{
// Bazaar Trader:
if (app->size == sizeof(TraderClick_Struct))
{
TraderClick_Struct* tcs = (TraderClick_Struct*)app->pBuffer;
LogTrading("Handle_OP_TraderShop: TraderClick_Struct TraderID [{}], Code [{}], Unknown008 [{}], Approval [{}]",
tcs->TraderID, tcs->Code, tcs->Unknown008, tcs->Approval);
if (tcs->Code == BazaarWelcome)
{
LogTrading("Client::Handle_OP_TraderShop: Sent Bazaar Welcome Info");
SendBazaarWelcome();
}
else
{
// This is when a potential purchaser right clicks on this client who is in Trader mode to
// browse their goods.
auto outapp = new EQApplicationPacket(OP_TraderShop, sizeof(TraderClick_Struct));
TraderClick_Struct* outtcs = (TraderClick_Struct*)outapp->pBuffer;
Client* Trader = entity_list.GetClientByID(tcs->TraderID);
if (Trader)
{
outtcs->Approval = Trader->WithCustomer(GetID());
LogTrading("Client::Handle_OP_TraderShop: Shop Request ([{}]) to ([{}]) with Approval: [{}]", GetCleanName(), Trader->GetCleanName(), outtcs->Approval);
}
else {
LogTrading("Client::Handle_OP_TraderShop: entity_list.GetClientByID(tcs->traderid)"
" returned a nullptr pointer");
safe_delete(outapp);
return;
}
outtcs->TraderID = tcs->TraderID;
outtcs->Unknown008 = 0x3f800000;
QueuePacket(outapp);
if (outtcs->Approval) {
this->BulkSendTraderInventory(Trader->CharacterID());
Trader->Trader_CustomerBrowsing(this);
TraderID = tcs->TraderID;
LogTrading("Client::Handle_OP_TraderShop: Trader Inventory Sent");
}
else
{
MessageString(Chat::Yellow, TRADER_BUSY);
LogTrading("Client::Handle_OP_TraderShop: Trader Busy");
}
safe_delete(outapp);
return;
}
}
else if (app->size == sizeof(BazaarWelcome_Struct))
{
// RoF+
// Client requested Bazaar Welcome Info (Trader and Item Total Counts)
SendBazaarWelcome();
LogTrading("Client::Handle_OP_TraderShop: Sent Bazaar Welcome Info");
}
else if (app->size == sizeof(TraderBuy_Struct))
{
// RoF+
// Customer has purchased an item from the Trader
TraderBuy_Struct* tbs = (TraderBuy_Struct*)app->pBuffer;
if (Client* Trader = entity_list.GetClientByID(tbs->TraderID))
{
BuyTraderItem(tbs, Trader, app);
LogTrading("Handle_OP_TraderShop: Buy Action [{}], Price [{}], Trader [{}], ItemID [{}], Quantity [{}], ItemName, [{}]",
tbs->Action, tbs->Price, tbs->TraderID, tbs->ItemID, tbs->Quantity, tbs->ItemName);
}
else
{
LogTrading("OP_TraderShop: Null Client Pointer");
}
}
else if (app->size == 4)
{
// RoF+
// Customer has closed the trade window
uint32 Command = *((uint32 *)app->pBuffer);
if (Command == 4)
{
Client* c = entity_list.GetClientByID(TraderID);
TraderID = 0;
if (c)
{
c->WithCustomer(0);
LogTrading("Client::Handle_OP_Trader: End Transaction - Code [{}]", Command);
}
else
{
LogTrading("Client::Handle_OP_Trader: Null Client Pointer for Trader - Code [{}]", Command);
}
EQApplicationPacket empty(OP_ShopEndConfirm);
QueuePacket(&empty);
}
else
{
LogTrading("Client::Handle_OP_Trader: Unhandled Code [{}]", Command);
}
}
else
{
LogTrading("Unknown size for OP_TraderShop: [{}]\n", app->size);
LogError("Unknown size for OP_TraderShop: [{}]\n", app->size);
DumpPacket(app);
return;
}
}
void Client::Handle_OP_TradeSkillCombine(const EQApplicationPacket *app)
{
if (app->size != sizeof(NewCombine_Struct)) {
LogError("Invalid size for NewCombine_Struct: Expected: [{}], Got: [{}]",
sizeof(NewCombine_Struct), app->size);
return;
}
/*if (m_tradeskill_object == nullptr) {
Message(Chat::Red, "Error: Server is not aware of the tradeskill container you are attempting to use");
return;
}*/
//fixed this to work for non-world objects
// Delegate to tradeskill object to perform combine
NewCombine_Struct* in_combine = (NewCombine_Struct*)app->pBuffer;
Object::HandleCombine(this, in_combine, m_tradeskill_object);
return;
}
void Client::Handle_OP_Translocate(const EQApplicationPacket *app)
{
if (app->size != sizeof(Translocate_Struct)) {
LogDebug("Size mismatch in OP_Translocate expected [{}] got [{}]", sizeof(Translocate_Struct), app->size);
DumpPacket(app);
return;
}
Translocate_Struct *its = (Translocate_Struct*)app->pBuffer;
if (!PendingTranslocate)
return;
if ((RuleI(Spells, TranslocateTimeLimit) > 0) && (time(nullptr) > (TranslocateTime + RuleI(Spells, TranslocateTimeLimit)))) {
Message(Chat::Red, "You did not accept the Translocate within the required time limit.");
PendingTranslocate = false;
return;
}
if (its->Complete == 1) {
int SpellID = PendingTranslocateData.spell_id;
int i = parse->EventSpell(EVENT_SPELL_EFFECT_TRANSLOCATE_COMPLETE, nullptr, this, SpellID, 0);
if (i == 0)
{
// If the spell has a translocate to bind effect, AND we are already in the zone the client
// is bound in, use the GoToBind method. If we send OP_Translocate in this case, the client moves itself
// to the bind coords it has from the PlayerProfile, but with the X and Y reversed. I suspect they are
// reversed in the pp, and since spells like Gate are handled serverside, this has not mattered before.
if (((SpellID == 1422) || (SpellID == 1334) || (SpellID == 3243)) &&
(zone->GetZoneID() == PendingTranslocateData.zone_id &&
zone->GetInstanceID() == PendingTranslocateData.instance_id))
{
PendingTranslocate = false;
GoToBind();
return;
}
////Was sending the packet back to initiate client zone...
////but that could be abusable, so lets go through proper channels
MovePC(PendingTranslocateData.zone_id, PendingTranslocateData.instance_id,
PendingTranslocateData.x, PendingTranslocateData.y,
PendingTranslocateData.z, PendingTranslocateData.heading, 0, ZoneSolicited);
}
}
PendingTranslocate = false;
}
void Client::Handle_OP_TributeItem(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeItem of length [{}]", app->size);
//player donates an item...
if (app->size != sizeof(TributeItem_Struct))
printf("Error in OP_TributeItem. Expected size of: %zu, but got: %i\n", sizeof(StartTribute_Struct), app->size);
else {
TributeItem_Struct* t = (TributeItem_Struct*)app->pBuffer;
tribute_master_id = t->tribute_master_id;
//make sure they are dealing with a valid tribute master
Mob* tribmast = entity_list.GetMob(t->tribute_master_id);
if (!tribmast || !tribmast->IsNPC() || tribmast->GetClass() != TRIBUTE_MASTER)
return;
if (DistanceSquared(m_Position, tribmast->GetPosition()) > USE_NPC_RANGE2)
return;
t->tribute_points = TributeItem(t->slot, t->quantity);
LogTribute("Sending tribute item reply with [{}] points", t->tribute_points);
QueuePacket(app);
}
return;
}
void Client::Handle_OP_TributeMoney(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeMoney of length [{}]", app->size);
//player donates money
if (app->size != sizeof(TributeMoney_Struct))
printf("Error in OP_TributeMoney. Expected size of: %zu, but got: %i\n", sizeof(StartTribute_Struct), app->size);
else {
TributeMoney_Struct* t = (TributeMoney_Struct*)app->pBuffer;
tribute_master_id = t->tribute_master_id;
//make sure they are dealing with a valid tribute master
Mob* tribmast = entity_list.GetMob(t->tribute_master_id);
if (!tribmast || !tribmast->IsNPC() || tribmast->GetClass() != TRIBUTE_MASTER)
return;
if (DistanceSquared(m_Position, tribmast->GetPosition()) > USE_NPC_RANGE2)
return;
t->tribute_points = TributeMoney(t->platinum);
LogTribute("Sending tribute money reply with [{}] points", t->tribute_points);
QueuePacket(app);
}
return;
}
void Client::Handle_OP_TributeNPC(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeNPC of length [{}]", app->size);
return;
}
void Client::Handle_OP_TributeToggle(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeToggle of length [{}]", app->size);
if (app->size != sizeof(uint32))
LogError("Invalid size on OP_TributeToggle packet");
else {
uint32 *val = (uint32 *)app->pBuffer;
ToggleTribute(*val ? true : false);
}
return;
}
void Client::Handle_OP_TributeUpdate(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeUpdate of length [{}]", app->size);
//sent when the client changes their tribute settings...
if (app->size != sizeof(TributeInfo_Struct))
LogError("Invalid size on OP_TributeUpdate packet");
else {
TributeInfo_Struct *t = (TributeInfo_Struct *)app->pBuffer;
ChangeTributeSettings(t);
}
return;
}
void Client::Handle_OP_VetClaimRequest(const EQApplicationPacket *app)
{
if (app->size < sizeof(VeteranClaim)) {
LogDebug("OP_VetClaimRequest size lower than expected: got [{}] expected at least [{}]", app->size, sizeof(VeteranClaim));
DumpPacket(app);
return;
}
VeteranClaim *vcr = (VeteranClaim *)app->pBuffer;
if (vcr->claim_id == 0xFFFFFFFF) { // request update packet
SendRewards();
return;
}
// try to claim something!
auto vetapp = new EQApplicationPacket(OP_VetClaimReply, sizeof(VeteranClaim));
VeteranClaim *cr = (VeteranClaim *)vetapp->pBuffer;
strcpy(cr->name, GetName());
cr->claim_id = vcr->claim_id;
if (!TryReward(vcr->claim_id))
cr->action = 1;
else
cr->action = 0;
FastQueuePacket(&vetapp);
}
void Client::Handle_OP_VoiceMacroIn(const EQApplicationPacket *app)
{
if (app->size != sizeof(VoiceMacroIn_Struct)) {
LogDebug("Size mismatch in OP_VoiceMacroIn expected [{}] got [{}]", sizeof(VoiceMacroIn_Struct), app->size);
DumpPacket(app);
return;
}
if (!RuleB(Chat, EnableVoiceMacros)) return;
VoiceMacroIn_Struct* vmi = (VoiceMacroIn_Struct*)app->pBuffer;
VoiceMacroReceived(vmi->Type, vmi->Target, vmi->MacroNumber);
}
void Client::Handle_OP_UpdateAura(const EQApplicationPacket *app)
{
if (app->size != sizeof(AuraDestory_Struct)) {
LogDebug("Size mismatch in OP_UpdateAura expected [{}] got [{}]", sizeof(AuraDestory_Struct), app->size);
return;
}
// client only sends this for removing
auto aura = (AuraDestory_Struct *)app->pBuffer;
if (aura->action != 1)
return; // could log I guess, but should only ever get this action
RemoveAura(aura->entity_id);
QueuePacket(app); // if we don't resend this, the client gets confused
return;
}
void Client::Handle_OP_WearChange(const EQApplicationPacket *app)
{
if (app->size != sizeof(WearChange_Struct)) {
std::cout << "Wrong size: OP_WearChange, size=" << app->size << ", expected " << sizeof(WearChange_Struct) << std::endl;
return;
}
WearChange_Struct* wc = (WearChange_Struct*)app->pBuffer;
if (wc->spawn_id != GetID())
return;
// Hero Forge ID needs to be fixed here as RoF2 appears to send an incorrect value.
if (wc->hero_forge_model != 0 && wc->wear_slot_id >= 0 && wc->wear_slot_id < EQ::textures::weaponPrimary)
wc->hero_forge_model = GetHerosForgeModel(wc->wear_slot_id);
// we could maybe ignore this and just send our own from moveitem
entity_list.QueueClients(this, app, true);
}
void Client::Handle_OP_WhoAllRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(Who_All_Struct)) {
std::cout << "Wrong size on OP_WhoAll. Got: " << app->size << ", Expected: " << sizeof(Who_All_Struct) << std::endl;
return;
}
Who_All_Struct* whoall = (Who_All_Struct*)app->pBuffer;
if (whoall->type == 0) // SoF only, for regular /who
entity_list.ZoneWho(this, whoall);
else
WhoAll(whoall);
return;
}
void Client::Handle_OP_XTargetAutoAddHaters(const EQApplicationPacket *app)
{
if (app->size != 1)
{
LogDebug("Size mismatch in OP_XTargetAutoAddHaters, expected 1, got [{}]", app->size);
DumpPacket(app);
return;
}
XTargetAutoAddHaters = app->ReadUInt8(0);
SetDirtyAutoHaters();
}
void Client::Handle_OP_XTargetOpen(const EQApplicationPacket *app)
{
if (app->size != 4) {
LogDebug("Size mismatch in OP_XTargetOpen, expected 1, got [{}]", app->size);
DumpPacket(app);
return;
}
auto outapp = new EQApplicationPacket(OP_XTargetOpenResponse, 0);
FastQueuePacket(&outapp);
}
void Client::Handle_OP_XTargetRequest(const EQApplicationPacket *app)
{
if (app->size < 12)
{
LogDebug("Size mismatch in OP_XTargetRequest, expected at least 12, got [{}]", app->size);
DumpPacket(app);
return;
}
uint32 Unknown000 = app->ReadUInt32(0);
if (Unknown000 != 1)
return;
uint32 Slot = app->ReadUInt32(4);
if (Slot >= XTARGET_HARDCAP)
return;
XTargetType Type = (XTargetType)app->ReadUInt32(8);
XTargets[Slot].Type = Type;
XTargets[Slot].ID = 0;
XTargets[Slot].Name[0] = 0;
switch (Type)
{
case Empty:
case Auto:
{
break;
}
case CurrentTargetPC:
{
char Name[65];
app->ReadString(Name, 12, 64);
Client *c = entity_list.GetClientByName(Name);
if (c)
{
XTargets[Slot].ID = c->GetID();
strncpy(XTargets[Slot].Name, c->GetName(), 64);
}
else
{
strncpy(XTargets[Slot].Name, Name, 64);
}
SendXTargetPacket(Slot, c);
break;
}
case CurrentTargetNPC:
{
char Name[65];
app->ReadString(Name, 12, 64);
Mob *m = entity_list.GetMob(Name);
if (m)
{
XTargets[Slot].ID = m->GetID();
SendXTargetPacket(Slot, m);
break;
}
}
case TargetsTarget:
{
if (GetTarget())
UpdateXTargetType(TargetsTarget, GetTarget()->GetTarget());
else
UpdateXTargetType(TargetsTarget, nullptr);
break;
}
case GroupTank:
{
Group *g = GetGroup();
if (g)
{
Client *c = entity_list.GetClientByName(g->GetMainTankName());
if (c)
{
XTargets[Slot].ID = c->GetID();
strncpy(XTargets[Slot].Name, c->GetName(), 64);
}
else
{
strncpy(XTargets[Slot].Name, g->GetMainTankName(), 64);
}
SendXTargetPacket(Slot, c);
}
break;
}
case GroupTankTarget:
{
Group *g = GetGroup();
if (g)
g->NotifyTankTarget(this);
break;
}
case GroupAssist:
{
Group *g = GetGroup();
if (g)
{
Client *c = entity_list.GetClientByName(g->GetMainAssistName());
if (c)
{
XTargets[Slot].ID = c->GetID();
strncpy(XTargets[Slot].Name, c->GetName(), 64);
}
else
{
strncpy(XTargets[Slot].Name, g->GetMainAssistName(), 64);
}
SendXTargetPacket(Slot, c);
}
break;
}
case GroupAssistTarget:
{
Group *g = GetGroup();
if (g)
g->NotifyAssistTarget(this);
break;
}
case Puller:
{
Group *g = GetGroup();
if (g)
{
Client *c = entity_list.GetClientByName(g->GetPullerName());
if (c)
{
XTargets[Slot].ID = c->GetID();
strncpy(XTargets[Slot].Name, c->GetName(), 64);
}
else
{
strncpy(XTargets[Slot].Name, g->GetPullerName(), 64);
}
SendXTargetPacket(Slot, c);
}
break;
}
case PullerTarget:
{
Group *g = GetGroup();
if (g)
g->NotifyPullerTarget(this);
break;
}
case GroupMarkTarget1:
case GroupMarkTarget2:
case GroupMarkTarget3:
{
Group *g = GetGroup();
if (g)
g->SendMarkedNPCsToMember(this);
break;
}
case RaidAssist1:
case RaidAssist2:
case RaidAssist3:
case RaidAssist1Target:
case RaidAssist2Target:
case RaidAssist3Target:
case RaidMarkTarget1:
case RaidMarkTarget2:
case RaidMarkTarget3:
{
// Not implemented yet.
break;
}
case MyPet:
{
Mob *m = GetPet();
if (m)
{
XTargets[Slot].ID = m->GetID();
SendXTargetPacket(Slot, m);
}
break;
}
case MyPetTarget:
{
Mob *m = GetPet();
if (m)
m = m->GetTarget();
if (m)
{
XTargets[Slot].ID = m->GetID();
SendXTargetPacket(Slot, m);
}
break;
}
default:
LogDebug("Unhandled XTarget Type [{}]", Type);
break;
}
}
void Client::Handle_OP_YellForHelp(const EQApplicationPacket *app)
{
auto outapp = new EQApplicationPacket(OP_YellForHelp, 4);
*(uint32 *)outapp->pBuffer = GetID();
entity_list.QueueCloseClients(this, outapp, true, 100.0);
safe_delete(outapp);
return;
}
void Client::Handle_OP_ResetAA(const EQApplicationPacket *app)
{
if (Admin() >= 50) {
Message(0, "Resetting AA points.");
ResetAA();
}
return;
}
void Client::Handle_OP_MovementHistoryList(const EQApplicationPacket *app)
{
cheat_manager.ProcessMovementHistory(app);
}
void Client::Handle_OP_UnderWorld(const EQApplicationPacket *app)
{
UnderWorld *m_UnderWorld = (UnderWorld *) app->pBuffer;
if (app->size != sizeof(UnderWorld)) {
LogDebug("Size mismatch in OP_UnderWorld, expected {}, got [{}]", sizeof(UnderWorld), app->size);
DumpPacket(app);
return;
}
auto dist = Distance(
glm::vec3(m_UnderWorld->x, m_UnderWorld->y, zone->newzone_data.underworld),
glm::vec3(m_UnderWorld->x, m_UnderWorld->y, m_UnderWorld->z));
cheat_manager.MovementCheck(glm::vec3(m_UnderWorld->x, m_UnderWorld->y, m_UnderWorld->z));
if (m_UnderWorld->spawn_id == GetID() && dist <= 5.0f && zone->newzone_data.underworld_teleport_index != 0) {
cheat_manager.SetExemptStatus(Port, true);
}
}
void Client::Handle_OP_SharedTaskRemovePlayer(const EQApplicationPacket *app)
{
if (app->size != sizeof(SharedTaskRemovePlayer_Struct)) {
LogPacketClientServer(
"Wrong size on Handle_OP_SharedTaskRemovePlayer | got [{}] expected [{}]",
app->size,
sizeof(SharedTaskRemovePlayer_Struct)
);
return;
}
auto *r = (SharedTaskRemovePlayer_Struct *) app->pBuffer;
LogTasks(
"[Handle_OP_SharedTaskRemovePlayer] field1 [{}] field2 [{}] player_name [{}]",
r->field1,
r->field2,
r->player_name
);
// live no-ops this command if not in a shared task
if (GetTaskState()->HasActiveSharedTask()) {
// struct
auto p = new ServerPacket(
ServerOP_SharedTaskRemovePlayer,
sizeof(ServerSharedTaskRemovePlayer_Struct)
);
auto *rp = (ServerSharedTaskRemovePlayer_Struct *) p->pBuffer;
// fill
rp->source_character_id = CharacterID();
rp->task_id = GetTaskState()->GetActiveSharedTask().task_id;
strn0cpy(rp->player_name, r->player_name, sizeof(r->player_name));
LogTasks(
"[Handle_OP_SharedTaskRemovePlayer] source_character_id [{}] task_id [{}] player_name [{}]",
rp->source_character_id,
rp->task_id,
rp->player_name
);
// send
worldserver.SendPacket(p);
safe_delete(p);
}
}
void Client::Handle_OP_SharedTaskAddPlayer(const EQApplicationPacket *app)
{
if (app->size != sizeof(SharedTaskAddPlayer_Struct)) {
LogPacketClientServer(
"Wrong size on Handle_OP_SharedTaskAddPlayer | got [{}] expected [{}]",
app->size,
sizeof(SharedTaskAddPlayer_Struct)
);
return;
}
auto *r = (SharedTaskAddPlayer_Struct *) app->pBuffer;
LogTasks(
"[SharedTaskAddPlayer_Struct] field1 [{}] field2 [{}] player_name [{}]",
r->field1,
r->field2,
r->player_name
);
if (!GetTaskState()->HasActiveSharedTask()) {
// this message is generated client-side in newer clients
Message(Chat::System, SharedTaskMessage::GetEQStr(SharedTaskMessage::COULD_NOT_USE_COMMAND));
}
else {
// struct
auto p = new ServerPacket(
ServerOP_SharedTaskAddPlayer,
sizeof(ServerSharedTaskAddPlayer_Struct)
);
auto *rp = (ServerSharedTaskAddPlayer_Struct *) p->pBuffer;
// fill
rp->source_character_id = CharacterID();
rp->task_id = GetTaskState()->GetActiveSharedTask().task_id;
strn0cpy(rp->player_name, r->player_name, sizeof(r->player_name));
LogTasks(
"[Handle_OP_SharedTaskRemovePlayer] source_character_id [{}] task_id [{}] player_name [{}]",
rp->source_character_id,
rp->task_id,
rp->player_name
);
// send
worldserver.SendPacket(p);
safe_delete(p);
}
}
void Client::Handle_OP_SharedTaskMakeLeader(const EQApplicationPacket *app)
{
if (app->size != sizeof(SharedTaskMakeLeader_Struct)) {
LogPacketClientServer(
"Wrong size on Handle_OP_SharedTaskMakeLeader | got [{}] expected [{}]",
app->size,
sizeof(SharedTaskMakeLeader_Struct)
);
return;
}
auto *r = (SharedTaskMakeLeader_Struct *) app->pBuffer;
LogTasks(
"[SharedTaskMakeLeader_Struct] field1 [{}] field2 [{}] player_name [{}]",
r->field1,
r->field2,
r->player_name
);
// live no-ops this command if not in a shared task
if (GetTaskState()->HasActiveSharedTask()) {
// struct
auto p = new ServerPacket(
ServerOP_SharedTaskMakeLeader,
sizeof(ServerSharedTaskMakeLeader_Struct)
);
auto *rp = (ServerSharedTaskMakeLeader_Struct *) p->pBuffer;
// fill
rp->source_character_id = CharacterID();
rp->task_id = GetTaskState()->GetActiveSharedTask().task_id;
strn0cpy(rp->player_name, r->player_name, sizeof(r->player_name));
LogTasks(
"[Handle_OP_SharedTaskRemovePlayer] source_character_id [{}] task_id [{}] player_name [{}]",
rp->source_character_id,
rp->task_id,
rp->player_name
);
// send
worldserver.SendPacket(p);
safe_delete(p);
}
}
void Client::Handle_OP_SharedTaskInviteResponse(const EQApplicationPacket *app)
{
if (app->size != sizeof(SharedTaskInviteResponse_Struct)) {
LogPacketClientServer(
"Wrong size on SharedTaskInviteResponse | got [{}] expected [{}]",
app->size,
sizeof(SharedTaskInviteResponse_Struct)
);
return;
}
auto *r = (SharedTaskInviteResponse_Struct *) app->pBuffer;
LogTasks(
"[SharedTaskInviteResponse] unknown00 [{}] invite_id [{}] accepted [{}]",
r->unknown00,
r->invite_id,
r->accepted
);
// struct
auto p = new ServerPacket(
ServerOP_SharedTaskInviteAcceptedPlayer,
sizeof(ServerSharedTaskInviteAccepted_Struct)
);
auto *c = (ServerSharedTaskInviteAccepted_Struct *) p->pBuffer;
// fill
c->source_character_id = CharacterID();
c->shared_task_id = r->invite_id;
c->accepted = r->accepted;
strn0cpy(c->player_name, GetName(), sizeof(c->player_name));
LogTasks(
"[ServerOP_SharedTaskInviteAcceptedPlayer] source_character_id [{}] shared_task_id [{}]",
c->source_character_id,
c->shared_task_id
);
// send
worldserver.SendPacket(p);
safe_delete(p);
}
void Client::Handle_OP_SharedTaskAccept(const EQApplicationPacket* app)
{
auto buf = reinterpret_cast<SharedTaskAccept_Struct*>(app->pBuffer);
LogTasksDetail(
"[OP_SharedTaskAccept] unknown00 [{}] unknown04 [{}] npc_entity_id [{}] task_id [{}]",
buf->unknown00,
buf->unknown04,
buf->npc_entity_id,
buf->task_id
);
if (buf->task_id > 0 && RuleB(TaskSystem, EnableTaskSystem) && task_state) {
task_state->AcceptNewTask(this, buf->task_id, buf->npc_entity_id, std::time(nullptr));
}
}
void Client::Handle_OP_SharedTaskQuit(const EQApplicationPacket* app)
{
if (GetTaskState()->HasActiveSharedTask())
{
CancelTask(TASKSLOTSHAREDTASK, TaskType::Shared);
}
}
void Client::Handle_OP_TaskTimers(const EQApplicationPacket* app)
{
GetTaskState()->ListTaskTimers(this);
}
void Client::Handle_OP_SharedTaskPlayerList(const EQApplicationPacket* app)
{
if (GetTaskState()->HasActiveSharedTask())
{
uint32_t size = sizeof(ServerSharedTaskPlayerList_Struct);
auto pack = std::make_unique<ServerPacket>(ServerOP_SharedTaskPlayerList, size);
auto buf = reinterpret_cast<ServerSharedTaskPlayerList_Struct*>(pack->pBuffer);
buf->source_character_id = CharacterID();
buf->task_id = GetTaskState()->GetActiveSharedTask().task_id;
worldserver.SendPacket(pack.get());
}
}
int64 Client::GetSharedTaskId() const
{
return m_shared_task_id;
}
void Client::SetSharedTaskId(int64 shared_task_id)
{
Client::m_shared_task_id = shared_task_id;
}
| 1 | 11,022 | Shouldn't need this-> here. | EQEmu-Server | cpp |
@@ -2717,7 +2717,7 @@ bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeI
player->sendTradeItemRequest(player->getName(), tradeItem, true);
if (tradePartner->tradeState == TRADE_NONE) {
- tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("{:s} wants to trade with you.", player->getName()));
+ tradePartner->sendTextMessage(MESSAGE_TRADE, fmt::format("{:s} wants to trade with you.", player->getName()));
tradePartner->tradeState = TRADE_ACKNOWLEDGE;
tradePartner->tradePartner = player;
} else { | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "pugicast.h"
#include "actions.h"
#include "bed.h"
#include "configmanager.h"
#include "creature.h"
#include "creatureevent.h"
#include "databasetasks.h"
#include "events.h"
#include "game.h"
#include "globalevent.h"
#include "iologindata.h"
#include "iomarket.h"
#include "items.h"
#include "monster.h"
#include "movement.h"
#include "scheduler.h"
#include "server.h"
#include "spells.h"
#include "talkaction.h"
#include "weapons.h"
#include "script.h"
#include <fmt/format.h>
extern ConfigManager g_config;
extern Actions* g_actions;
extern Chat* g_chat;
extern TalkActions* g_talkActions;
extern Spells* g_spells;
extern Vocations g_vocations;
extern GlobalEvents* g_globalEvents;
extern CreatureEvents* g_creatureEvents;
extern Events* g_events;
extern Monsters g_monsters;
extern MoveEvents* g_moveEvents;
extern Weapons* g_weapons;
extern Scripts* g_scripts;
Game::Game()
{
offlineTrainingWindow.defaultEnterButton = 1;
offlineTrainingWindow.defaultEscapeButton = 0;
offlineTrainingWindow.choices.emplace_back("Sword Fighting and Shielding", SKILL_SWORD);
offlineTrainingWindow.choices.emplace_back("Axe Fighting and Shielding", SKILL_AXE);
offlineTrainingWindow.choices.emplace_back("Club Fighting and Shielding", SKILL_CLUB);
offlineTrainingWindow.choices.emplace_back("Distance Fighting and Shielding", SKILL_DISTANCE);
offlineTrainingWindow.choices.emplace_back("Magic Level and Shielding", SKILL_MAGLEVEL);
offlineTrainingWindow.buttons.emplace_back("Okay", offlineTrainingWindow.defaultEnterButton);
offlineTrainingWindow.buttons.emplace_back("Cancel", offlineTrainingWindow.defaultEscapeButton);
offlineTrainingWindow.priority = true;
}
Game::~Game()
{
for (const auto& it : guilds) {
delete it.second;
}
}
void Game::start(ServiceManager* manager)
{
serviceManager = manager;
updateWorldTime();
if (g_config.getBoolean(ConfigManager::DEFAULT_WORLD_LIGHT)) {
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
}
g_scheduler.addEvent(createSchedulerTask(EVENT_CREATURE_THINK_INTERVAL, std::bind(&Game::checkCreatures, this, 0)));
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
}
GameState_t Game::getGameState() const
{
return gameState;
}
void Game::setWorldType(WorldType_t type)
{
worldType = type;
}
void Game::setGameState(GameState_t newState)
{
if (gameState == GAME_STATE_SHUTDOWN) {
return; //this cannot be stopped
}
if (gameState == newState) {
return;
}
gameState = newState;
switch (newState) {
case GAME_STATE_INIT: {
groups.load();
g_chat->load();
map.spawns.startup();
raids.loadFromXml();
raids.startup();
quests.loadFromXml();
mounts.loadFromXml();
loadMotdNum();
loadPlayersRecord();
loadAccountStorageValues();
g_globalEvents->startup();
break;
}
case GAME_STATE_SHUTDOWN: {
g_globalEvents->execute(GLOBALEVENT_SHUTDOWN);
//kick all players that are still online
auto it = players.begin();
while (it != players.end()) {
it->second->kickPlayer(true);
it = players.begin();
}
saveMotdNum();
saveGameState();
g_dispatcher.addTask(
createTask(std::bind(&Game::shutdown, this)));
g_scheduler.stop();
g_databaseTasks.stop();
g_dispatcher.stop();
break;
}
case GAME_STATE_CLOSED: {
/* kick all players without the CanAlwaysLogin flag */
auto it = players.begin();
while (it != players.end()) {
if (!it->second->hasFlag(PlayerFlag_CanAlwaysLogin)) {
it->second->kickPlayer(true);
it = players.begin();
} else {
++it;
}
}
saveGameState();
break;
}
default:
break;
}
}
void Game::saveGameState()
{
if (gameState == GAME_STATE_NORMAL) {
setGameState(GAME_STATE_MAINTAIN);
}
std::cout << "Saving server..." << std::endl;
if (!saveAccountStorageValues()) {
std::cout << "[Error - Game::saveGameState] Failed to save account-level storage values." << std::endl;
}
for (const auto& it : players) {
it.second->loginPosition = it.second->getPosition();
IOLoginData::savePlayer(it.second);
}
Map::save();
g_databaseTasks.flush();
if (gameState == GAME_STATE_MAINTAIN) {
setGameState(GAME_STATE_NORMAL);
}
}
bool Game::loadMainMap(const std::string& filename)
{
return map.loadMap("data/world/" + filename + ".otbm", true);
}
void Game::loadMap(const std::string& path)
{
map.loadMap(path, false);
}
Cylinder* Game::internalGetCylinder(Player* player, const Position& pos) const
{
if (pos.x != 0xFFFF) {
return map.getTile(pos);
}
//container
if (pos.y & 0x40) {
uint8_t from_cid = pos.y & 0x0F;
return player->getContainerByID(from_cid);
}
//inventory
return player;
}
Thing* Game::internalGetThing(Player* player, const Position& pos, int32_t index, uint32_t spriteId, stackPosType_t type) const
{
if (pos.x != 0xFFFF) {
Tile* tile = map.getTile(pos);
if (!tile) {
return nullptr;
}
Thing* thing;
switch (type) {
case STACKPOS_LOOK: {
return tile->getTopVisibleThing(player);
}
case STACKPOS_MOVE: {
Item* item = tile->getTopDownItem();
if (item && item->isMoveable()) {
thing = item;
} else {
thing = tile->getTopVisibleCreature(player);
}
break;
}
case STACKPOS_USEITEM: {
thing = tile->getUseItem(index);
break;
}
case STACKPOS_TOPDOWN_ITEM: {
thing = tile->getTopDownItem();
break;
}
case STACKPOS_USETARGET: {
thing = tile->getTopVisibleCreature(player);
if (!thing) {
thing = tile->getUseItem(index);
}
break;
}
default: {
thing = nullptr;
break;
}
}
if (player && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//do extra checks here if the thing is accessible
if (thing && thing->getItem()) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
thing = nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
thing = nullptr;
}
}
}
}
return thing;
}
//container
if (pos.y & 0x40) {
uint8_t fromCid = pos.y & 0x0F;
Container* parentContainer = player->getContainerByID(fromCid);
if (!parentContainer) {
return nullptr;
}
if (parentContainer->getID() == ITEM_BROWSEFIELD) {
Tile* tile = parentContainer->getTile();
if (tile && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
return nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
return nullptr;
}
}
}
}
uint8_t slot = pos.z;
return parentContainer->getItemByIndex(player->getContainerIndex(fromCid) + slot);
} else if (pos.y == 0 && pos.z == 0) {
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return nullptr;
}
int32_t subType;
if (it.isFluidContainer() && index < static_cast<int32_t>(sizeof(reverseFluidMap) / sizeof(uint8_t))) {
subType = reverseFluidMap[index];
} else {
subType = -1;
}
return findItemOfType(player, it.id, true, subType);
}
//inventory
slots_t slot = static_cast<slots_t>(pos.y);
if (slot == CONST_SLOT_STORE_INBOX) {
return player->getStoreInbox();
}
return player->getInventoryItem(slot);
}
void Game::internalGetPosition(Item* item, Position& pos, uint8_t& stackpos)
{
pos.x = 0;
pos.y = 0;
pos.z = 0;
stackpos = 0;
Cylinder* topParent = item->getTopParent();
if (topParent) {
if (Player* player = dynamic_cast<Player*>(topParent)) {
pos.x = 0xFFFF;
Container* container = dynamic_cast<Container*>(item->getParent());
if (container) {
pos.y = static_cast<uint16_t>(0x40) | static_cast<uint16_t>(player->getContainerID(container));
pos.z = container->getThingIndex(item);
stackpos = pos.z;
} else {
pos.y = player->getThingIndex(item);
stackpos = pos.y;
}
} else if (Tile* tile = topParent->getTile()) {
pos = tile->getPosition();
stackpos = tile->getThingIndex(item);
}
}
}
Creature* Game::getCreatureByID(uint32_t id)
{
if (id <= Player::playerIDLimit) {
return getPlayerByID(id);
} else if (id <= Npc::npcAutoID) {
return getNpcByID(id);
} else if (id <= Monster::monsterAutoID) {
return getMonsterByID(id);
}
return nullptr;
}
Monster* Game::getMonsterByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = monsters.find(id);
if (it == monsters.end()) {
return nullptr;
}
return it->second;
}
Npc* Game::getNpcByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = npcs.find(id);
if (it == npcs.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = players.find(id);
if (it == players.end()) {
return nullptr;
}
return it->second;
}
Creature* Game::getCreatureByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const std::string& lowerCaseName = asLowerCaseString(s);
{
auto it = mappedPlayerNames.find(lowerCaseName);
if (it != mappedPlayerNames.end()) {
return it->second;
}
}
auto equalCreatureName = [&](const std::pair<uint32_t, Creature*>& it) {
auto name = it.second->getName();
return lowerCaseName.size() == name.size() && std::equal(lowerCaseName.begin(), lowerCaseName.end(), name.begin(), [](char a, char b) {
return a == std::tolower(b);
});
};
{
auto it = std::find_if(npcs.begin(), npcs.end(), equalCreatureName);
if (it != npcs.end()) {
return it->second;
}
}
{
auto it = std::find_if(monsters.begin(), monsters.end(), equalCreatureName);
if (it != monsters.end()) {
return it->second;
}
}
return nullptr;
}
Npc* Game::getNpcByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const char* npcName = s.c_str();
for (const auto& it : npcs) {
if (strcasecmp(npcName, it.second->getName().c_str()) == 0) {
return it.second;
}
}
return nullptr;
}
Player* Game::getPlayerByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
auto it = mappedPlayerNames.find(asLowerCaseString(s));
if (it == mappedPlayerNames.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByGUID(const uint32_t& guid)
{
if (guid == 0) {
return nullptr;
}
auto it = mappedPlayerGuids.find(guid);
if (it == mappedPlayerGuids.end()) {
return nullptr;
}
return it->second;
}
ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player)
{
size_t strlen = s.length();
if (strlen == 0 || strlen > PLAYER_NAME_LENGTH) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
if (s.back() == '~') {
const std::string& query = asLowerCaseString(s.substr(0, strlen - 1));
std::string result;
ReturnValue ret = wildcardTree.findOne(query, result);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
player = getPlayerByName(result);
} else {
player = getPlayerByName(s);
}
if (!player) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
return RETURNVALUE_NOERROR;
}
Player* Game::getPlayerByAccount(uint32_t acc)
{
for (const auto& it : players) {
if (it.second->getAccount() == acc) {
return it.second;
}
}
return nullptr;
}
bool Game::internalPlaceCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (creature->getParent()) {
return false;
}
if (!map.placeCreature(pos, creature, extendedPos, forced)) {
return false;
}
creature->incrementReferenceCounter();
creature->setID();
creature->addList();
return true;
}
bool Game::placeCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (!internalPlaceCreature(creature, pos, extendedPos, forced)) {
return false;
}
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true);
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureAppear(creature, creature->getPosition(), true);
}
}
for (Creature* spectator : spectators) {
spectator->onCreatureAppear(creature, true);
}
creature->getParent()->postAddNotification(creature, nullptr, 0);
addCreatureCheck(creature);
creature->onPlacedCreature();
return true;
}
bool Game::removeCreature(Creature* creature, bool isLogout/* = true*/)
{
if (creature->isRemoved()) {
return false;
}
Tile* tile = creature->getTile();
std::vector<int32_t> oldStackPosVector;
SpectatorVec spectators;
map.getSpectators(spectators, tile->getPosition(), true);
for (Creature* spectator : spectators) {
if (Player* player = spectator->getPlayer()) {
oldStackPosVector.push_back(player->canSeeCreature(creature) ? tile->getClientIndexOfCreature(player, creature) : -1);
}
}
tile->removeCreature(creature);
const Position& tilePosition = tile->getPosition();
//send to client
size_t i = 0;
for (Creature* spectator : spectators) {
if (Player* player = spectator->getPlayer()) {
player->sendRemoveTileCreature(creature, tilePosition, oldStackPosVector[i++]);
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onRemoveCreature(creature, isLogout);
}
creature->getParent()->postRemoveNotification(creature, nullptr, 0);
creature->removeList();
creature->setRemoved();
ReleaseCreature(creature);
removeCreatureCheck(creature);
for (Creature* summon : creature->summons) {
summon->setSkillLoss(false);
removeCreature(summon);
}
return true;
}
void Game::executeDeath(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && !creature->isRemoved()) {
creature->onDeath();
}
}
void Game::playerMoveThing(uint32_t playerId, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Creature* movingCreature = thing->getCreature()) {
Tile* tile = map.getTile(toPos);
if (!tile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Position::areInRange<1, 1, 0>(movingCreature->getPosition(), player->getPosition())) {
SchedulerTask* task = createSchedulerTask(MOVE_CREATURE_INTERVAL,
std::bind(&Game::playerMoveCreatureByID, this, player->getID(),
movingCreature->getID(), movingCreature->getPosition(), tile->getPosition()));
player->setNextActionTask(task);
} else {
playerMoveCreature(player, movingCreature, movingCreature->getPosition(), tile);
}
} else if (thing->getItem()) {
Cylinder* toCylinder = internalGetCylinder(player, toPos);
if (!toCylinder) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, thing->getItem(), toCylinder);
}
}
void Game::playerMoveCreatureByID(uint32_t playerId, uint32_t movingCreatureId, const Position& movingCreatureOrigPos, const Position& toPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* movingCreature = getCreatureByID(movingCreatureId);
if (!movingCreature) {
return;
}
Tile* toTile = map.getTile(toPos);
if (!toTile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveCreature(player, movingCreature, movingCreatureOrigPos, toTile);
}
void Game::playerMoveCreature(Player* player, Creature* movingCreature, const Position& movingCreatureOrigPos, Tile* toTile)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveCreatureByID,
this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextActionTask(task);
return;
}
if (movingCreature->isMovementBlocked()) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
player->setNextActionTask(nullptr);
if (!Position::areInRange<1, 1, 0>(movingCreatureOrigPos, player->getPosition())) {
//need to walk to the creature first before moving it
std::vector<Direction> listDir;
if (player->getPathTo(movingCreatureOrigPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_MOVE_CREATURE_INTERVAL, std::bind(&Game::playerMoveCreatureByID, this,
player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
if ((!movingCreature->isPushable() && !player->hasFlag(PlayerFlag_CanPushAllCreatures)) ||
(movingCreature->isInGhostMode() && !player->canSeeGhostMode(movingCreature))) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
//check throw distance
const Position& movingCreaturePos = movingCreature->getPosition();
const Position& toPos = toTile->getPosition();
if ((Position::getDistanceX(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceY(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceZ(movingCreaturePos, toPos) * 4 > movingCreature->getThrowRange())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (player != movingCreature) {
if (toTile->hasFlag(TILESTATE_BLOCKPATH)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
} else if ((movingCreature->getZone() == ZONE_PROTECTION && !toTile->hasFlag(TILESTATE_PROTECTIONZONE)) || (movingCreature->getZone() == ZONE_NOPVP && !toTile->hasFlag(TILESTATE_NOPVPZONE))) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
} else {
if (CreatureVector* tileCreatures = toTile->getCreatures()) {
for (Creature* tileCreature : *tileCreatures) {
if (!tileCreature->isInGhostMode()) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
Npc* movingNpc = movingCreature->getNpc();
if (movingNpc && !Spawns::isInZone(movingNpc->getMasterPos(), movingNpc->getMasterRadius(), toPos)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
if (!g_events->eventPlayerOnMoveCreature(player, movingCreature, movingCreaturePos, toPos)) {
return;
}
ReturnValue ret = internalMoveCreature(*movingCreature, *toTile);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveCreature(Creature* creature, Direction direction, uint32_t flags /*= 0*/)
{
creature->setLastPosition(creature->getPosition());
const Position& currentPos = creature->getPosition();
Position destPos = getNextPosition(direction, currentPos);
Player* player = creature->getPlayer();
bool diagonalMovement = (direction & DIRECTION_DIAGONAL_MASK) != 0;
if (player && !diagonalMovement) {
//try to go up
if (currentPos.z != 8 && creature->getTile()->hasHeight(3)) {
Tile* tmpTile = map.getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1);
if (!tmpTile || (!tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.getZ() - 1);
if (tmpTile && tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_IMMOVABLEBLOCKSOLID)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
if (!tmpTile->hasFlag(TILESTATE_FLOORCHANGE)) {
player->setDirection(direction);
destPos.z--;
}
}
}
}
//try to go down
if (currentPos.z != 7 && currentPos.z == destPos.z) {
Tile* tmpTile = map.getTile(destPos.x, destPos.y, destPos.z);
if (!tmpTile || (!tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.z + 1);
if (tmpTile && tmpTile->hasHeight(3) && !tmpTile->hasFlag(TILESTATE_IMMOVABLEBLOCKSOLID)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
player->setDirection(direction);
destPos.z++;
}
}
}
}
Tile* toTile = map.getTile(destPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
return internalMoveCreature(*creature, *toTile, flags);
}
ReturnValue Game::internalMoveCreature(Creature& creature, Tile& toTile, uint32_t flags /*= 0*/)
{
//check if we can move the creature to the destination
ReturnValue ret = toTile.queryAdd(0, creature, 1, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(creature, toTile);
if (creature.getParent() != &toTile) {
return RETURNVALUE_NOERROR;
}
int32_t index = 0;
Item* toItem = nullptr;
Tile* subCylinder = nullptr;
Tile* toCylinder = &toTile;
Tile* fromCylinder = nullptr;
uint32_t n = 0;
while ((subCylinder = toCylinder->queryDestination(index, creature, &toItem, flags)) != toCylinder) {
map.moveCreature(creature, *subCylinder);
if (creature.getParent() != subCylinder) {
//could happen if a script move the creature
fromCylinder = nullptr;
break;
}
fromCylinder = toCylinder;
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++n >= MAP_MAX_LAYERS) {
break;
}
}
if (fromCylinder) {
const Position& fromPosition = fromCylinder->getPosition();
const Position& toPosition = toCylinder->getPosition();
if (fromPosition.z != toPosition.z && (fromPosition.x != toPosition.x || fromPosition.y != toPosition.y)) {
Direction dir = getDirectionTo(fromPosition, toPosition);
if ((dir & DIRECTION_DIAGONAL_MASK) == 0) {
internalCreatureTurn(&creature, dir);
}
}
}
return RETURNVALUE_NOERROR;
}
void Game::playerMoveItemByPlayerID(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, nullptr, nullptr);
}
void Game::playerMoveItem(Player* player, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count, Item* item, Cylinder* toCylinder)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
if (!item) {
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing || !thing->getItem()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
item = thing->getItem();
}
if (item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* fromCylinder = internalGetCylinder(player, fromPos);
if (!fromCylinder) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (!toCylinder) {
toCylinder = internalGetCylinder(player, toPos);
if (!toCylinder) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!item->isPushable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
const Position& playerPos = player->getPosition();
const Position& mapFromPos = fromCylinder->getTile()->getPosition();
if (playerPos.z != mapFromPos.z) {
player->sendCancelMessage(playerPos.z > mapFromPos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, mapFromPos)) {
//need to walk to the item first before using it
std::vector<Direction> listDir;
if (player->getPathTo(item->getPosition(), listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_MOVE_ITEM_INTERVAL, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
const Tile* toCylinderTile = toCylinder->getTile();
const Position& mapToPos = toCylinderTile->getPosition();
//hangable item specific code
if (item->isHangable() && toCylinderTile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//destination supports hangable objects so need to move there first
bool vertical = toCylinderTile->hasProperty(CONST_PROP_ISVERTICAL);
if (vertical) {
if (playerPos.x + 1 == mapToPos.x) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
} else { // horizontal
if (playerPos.y + 1 == mapToPos.y) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!Position::areInRange<1, 1, 0>(playerPos, mapToPos)) {
Position walkPos = mapToPos;
if (vertical) {
walkPos.x++;
} else {
walkPos.y++;
}
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1>(mapFromPos, playerPos)
&& !Position::areInRange<1, 1, 0>(mapFromPos, walkPos)) {
//need to pickup the item first
Item* moveItem = nullptr;
ReturnValue ret = internalMoveItem(fromCylinder, player, INDEX_WHEREEVER, item, count, &moveItem, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::vector<Direction> listDir;
if (player->getPathTo(walkPos, listDir, 0, 0, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_MOVE_ITEM_INTERVAL, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), itemPos, spriteId, itemStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
}
if (!item->isPickupable() && playerPos.z != mapToPos.z) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
int32_t throwRange = item->getThrowRange();
if ((Position::getDistanceX(playerPos, mapToPos) > throwRange) ||
(Position::getDistanceY(playerPos, mapToPos) > throwRange)) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (!canThrowObjectTo(mapFromPos, mapToPos, true, false, throwRange, throwRange)) {
player->sendCancelMessage(RETURNVALUE_CANNOTTHROW);
return;
}
uint8_t toIndex = 0;
if (toPos.x == 0xFFFF) {
if (toPos.y & 0x40) {
toIndex = toPos.z;
} else {
toIndex = static_cast<uint8_t>(toPos.y);
}
}
ReturnValue ret = internalMoveItem(fromCylinder, toCylinder, toIndex, item, count, nullptr, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveItem(Cylinder* fromCylinder, Cylinder* toCylinder, int32_t index,
Item* item, uint32_t count, Item** _moveItem, uint32_t flags /*= 0*/, Creature* actor/* = nullptr*/, Item* tradeItem/* = nullptr*/, const Position* fromPos /*= nullptr*/, const Position* toPos/*= nullptr*/)
{
Player* actorPlayer = actor ? actor->getPlayer() : nullptr;
if (actorPlayer && fromPos && toPos) {
if (!g_events->eventPlayerOnMoveItem(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder)) {
return RETURNVALUE_NOTPOSSIBLE;
}
}
Tile* fromTile = fromCylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == fromCylinder) {
fromCylinder = fromTile;
}
}
Item* toItem = nullptr;
Cylinder* subCylinder;
int floorN = 0;
while ((subCylinder = toCylinder->queryDestination(index, *item, &toItem, flags)) != toCylinder) {
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++floorN >= MAP_MAX_LAYERS) {
break;
}
}
//destination is the same as the source?
if (item == toItem) {
return RETURNVALUE_NOERROR; //silently ignore move
}
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, count, flags, actor);
if (ret == RETURNVALUE_NEEDEXCHANGE) {
//check if we can add it to source cylinder
ret = fromCylinder->queryAdd(fromCylinder->getThingIndex(item), *toItem, toItem->getItemCount(), 0);
if (ret == RETURNVALUE_NOERROR) {
if (actorPlayer && fromPos && toPos && !g_events->eventPlayerOnMoveItem(actorPlayer, toItem, count, *toPos, *fromPos, toCylinder, fromCylinder)) {
return RETURNVALUE_NOTPOSSIBLE;
}
//check how much we can move
uint32_t maxExchangeQueryCount = 0;
ReturnValue retExchangeMaxCount = fromCylinder->queryMaxCount(INDEX_WHEREEVER, *toItem, toItem->getItemCount(), maxExchangeQueryCount, 0);
if (retExchangeMaxCount != RETURNVALUE_NOERROR && maxExchangeQueryCount == 0) {
return retExchangeMaxCount;
}
if (toCylinder->queryRemove(*toItem, toItem->getItemCount(), flags, actor) == RETURNVALUE_NOERROR) {
int32_t oldToItemIndex = toCylinder->getThingIndex(toItem);
toCylinder->removeThing(toItem, toItem->getItemCount());
fromCylinder->addThing(toItem);
if (oldToItemIndex != -1) {
toCylinder->postRemoveNotification(toItem, fromCylinder, oldToItemIndex);
}
int32_t newToItemIndex = fromCylinder->getThingIndex(toItem);
if (newToItemIndex != -1) {
fromCylinder->postAddNotification(toItem, toCylinder, newToItemIndex);
}
ret = toCylinder->queryAdd(index, *item, count, flags);
if (actorPlayer && fromPos && toPos) {
g_events->eventPlayerOnItemMoved(actorPlayer, toItem, count, *toPos, *fromPos, toCylinder, fromCylinder);
}
toItem = nullptr;
}
}
}
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
//check how much we can move
uint32_t maxQueryCount = 0;
ReturnValue retMaxCount = toCylinder->queryMaxCount(index, *item, count, maxQueryCount, flags);
if (retMaxCount != RETURNVALUE_NOERROR && maxQueryCount == 0) {
return retMaxCount;
}
uint32_t m;
if (item->isStackable()) {
m = std::min<uint32_t>(count, maxQueryCount);
} else {
m = maxQueryCount;
}
Item* moveItem = item;
//check if we can remove this item
ret = fromCylinder->queryRemove(*item, m, flags, actor);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (tradeItem) {
if (toCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
Cylinder* tmpCylinder = toCylinder->getParent();
while (tmpCylinder) {
if (tmpCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
tmpCylinder = tmpCylinder->getParent();
}
}
//remove the item
int32_t itemIndex = fromCylinder->getThingIndex(item);
Item* updateItem = nullptr;
fromCylinder->removeThing(item, m);
//update item(s)
if (item->isStackable()) {
uint32_t n;
if (item->equals(toItem)) {
n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
updateItem = toItem;
} else {
n = 0;
}
int32_t newCount = m - n;
if (newCount > 0) {
moveItem = item->clone();
moveItem->setItemCount(newCount);
} else {
moveItem = nullptr;
}
if (item->isRemoved()) {
ReleaseItem(item);
}
}
//add item
if (moveItem /*m - n > 0*/) {
toCylinder->addThing(index, moveItem);
}
if (itemIndex != -1) {
fromCylinder->postRemoveNotification(item, toCylinder, itemIndex);
}
if (moveItem) {
int32_t moveItemIndex = toCylinder->getThingIndex(moveItem);
if (moveItemIndex != -1) {
toCylinder->postAddNotification(moveItem, fromCylinder, moveItemIndex);
}
}
if (updateItem) {
int32_t updateItemIndex = toCylinder->getThingIndex(updateItem);
if (updateItemIndex != -1) {
toCylinder->postAddNotification(updateItem, fromCylinder, updateItemIndex);
}
}
if (_moveItem) {
if (moveItem) {
*_moveItem = moveItem;
} else {
*_moveItem = item;
}
}
//we could not move all, inform the player
if (item->isStackable() && maxQueryCount < count) {
return retMaxCount;
}
if (moveItem && moveItem->getDuration() > 0) {
if (moveItem->getDecaying() != DECAYING_TRUE) {
moveItem->incrementReferenceCounter();
moveItem->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(moveItem);
}
}
if (actorPlayer && fromPos && toPos) {
g_events->eventPlayerOnItemMoved(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder);
}
return ret;
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index /*= INDEX_WHEREEVER*/,
uint32_t flags/* = 0*/, bool test/* = false*/)
{
uint32_t remainderCount = 0;
return internalAddItem(toCylinder, item, index, flags, test, remainderCount);
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index,
uint32_t flags, bool test, uint32_t& remainderCount)
{
if (!toCylinder || !item) {
return RETURNVALUE_NOTPOSSIBLE;
}
Cylinder* destCylinder = toCylinder;
Item* toItem = nullptr;
toCylinder = toCylinder->queryDestination(index, *item, &toItem, flags);
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, item->getItemCount(), flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
/*
Check if we can move add the whole amount, we do this by checking against the original cylinder,
since the queryDestination can return a cylinder that might only hold a part of the full amount.
*/
uint32_t maxQueryCount = 0;
ret = destCylinder->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), maxQueryCount, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (test) {
return RETURNVALUE_NOERROR;
}
if (item->isStackable() && item->equals(toItem)) {
uint32_t m = std::min<uint32_t>(item->getItemCount(), maxQueryCount);
uint32_t n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
int32_t count = m - n;
if (count > 0) {
if (item->getItemCount() != count) {
Item* remainderItem = item->clone();
remainderItem->setItemCount(count);
if (internalAddItem(destCylinder, remainderItem, INDEX_WHEREEVER, flags, false) != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
remainderCount = count;
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
} else {
//fully merged with toItem, item will be destroyed
item->onRemoved();
ReleaseItem(item);
int32_t itemIndex = toCylinder->getThingIndex(toItem);
if (itemIndex != -1) {
toCylinder->postAddNotification(toItem, nullptr, itemIndex);
}
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
if (item->getDuration() > 0) {
item->incrementReferenceCounter();
item->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(item);
}
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalRemoveItem(Item* item, int32_t count /*= -1*/, bool test /*= false*/, uint32_t flags /*= 0*/)
{
Cylinder* cylinder = item->getParent();
if (!cylinder) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
if (count == -1) {
count = item->getItemCount();
}
//check if we can remove this item
ReturnValue ret = cylinder->queryRemove(*item, count, flags | FLAG_IGNORENOTMOVEABLE);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (!item->canRemove()) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!test) {
int32_t index = cylinder->getThingIndex(item);
//remove the item
cylinder->removeThing(item, count);
if (item->isRemoved()) {
item->onRemoved();
if (item->canDecay()) {
decayItems->remove(item);
}
ReleaseItem(item);
}
cylinder->postRemoveNotification(item, nullptr, index);
}
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalPlayerAddItem(Player* player, Item* item, bool dropOnMap /*= true*/, slots_t slot /*= CONST_SLOT_WHEREEVER*/)
{
uint32_t remainderCount = 0;
ReturnValue ret = internalAddItem(player, item, static_cast<int32_t>(slot), 0, false, remainderCount);
if (remainderCount != 0) {
Item* remainderItem = Item::CreateItem(item->getID(), remainderCount);
ReturnValue remaindRet = internalAddItem(player->getTile(), remainderItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
if (remaindRet != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
}
}
if (ret != RETURNVALUE_NOERROR && dropOnMap) {
ret = internalAddItem(player->getTile(), item, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
return ret;
}
Item* Game::findItemOfType(Cylinder* cylinder, uint16_t itemId,
bool depthSearch /*= true*/, int32_t subType /*= -1*/) const
{
if (!cylinder) {
return nullptr;
}
std::vector<Container*> containers;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
if (depthSearch) {
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
Container* subContainer = item->getContainer();
if (subContainer) {
containers.push_back(subContainer);
}
}
}
return nullptr;
}
bool Game::removeMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (!cylinder) {
return false;
}
if (money == 0) {
return true;
}
std::vector<Container*> containers;
std::multimap<uint32_t, Item*> moneyMap;
uint64_t moneyCount = 0;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
} else {
const uint32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
} else {
const uint32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
}
if (moneyCount < money) {
return false;
}
for (const auto& moneyEntry : moneyMap) {
Item* item = moneyEntry.second;
if (moneyEntry.first < money) {
internalRemoveItem(item);
money -= moneyEntry.first;
} else if (moneyEntry.first > money) {
const uint32_t worth = moneyEntry.first / item->getItemCount();
const uint32_t removeCount = std::ceil(money / static_cast<double>(worth));
addMoney(cylinder, (worth * removeCount) - money, flags);
internalRemoveItem(item, removeCount);
break;
} else {
internalRemoveItem(item);
break;
}
}
return true;
}
void Game::addMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (money == 0) {
return;
}
for (const auto& it : Item::items.currencyItems) {
const uint64_t worth = it.first;
uint32_t currencyCoins = money / worth;
if (currencyCoins <= 0) {
continue;
}
money -= currencyCoins * worth;
while (currencyCoins > 0) {
const uint16_t count = std::min<uint32_t>(100, currencyCoins);
Item* remaindItem = Item::CreateItem(it.second, count);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
currencyCoins -= count;
}
}
}
Item* Game::transformItem(Item* item, uint16_t newId, int32_t newCount /*= -1*/)
{
if (item->getID() == newId && (newCount == -1 || (newCount == item->getSubType() && newCount != 0))) { //chargeless item placed on map = infinite
return item;
}
Cylinder* cylinder = item->getParent();
if (!cylinder) {
return nullptr;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
int32_t itemIndex = cylinder->getThingIndex(item);
if (itemIndex == -1) {
return item;
}
if (!item->canTransform()) {
return item;
}
const ItemType& newType = Item::items[newId];
if (newType.id == 0) {
return item;
}
const ItemType& curType = Item::items[item->getID()];
if (curType.alwaysOnTop != newType.alwaysOnTop) {
//This only occurs when you transform items on tiles from a downItem to a topItem (or vice versa)
//Remove the old, and add the new
cylinder->removeThing(item, item->getItemCount());
cylinder->postRemoveNotification(item, cylinder, itemIndex);
item->setID(newId);
if (newCount != -1) {
item->setSubType(newCount);
}
cylinder->addThing(item);
Cylinder* newParent = item->getParent();
if (!newParent) {
ReleaseItem(item);
return nullptr;
}
newParent->postAddNotification(item, cylinder, newParent->getThingIndex(item));
return item;
}
if (curType.type == newType.type) {
//Both items has the same type so we can safely change id/subtype
if (newCount == 0 && (item->isStackable() || item->hasAttribute(ITEM_ATTRIBUTE_CHARGES))) {
if (item->isStackable()) {
internalRemoveItem(item);
return nullptr;
} else {
int32_t newItemId = newId;
if (curType.id == newType.id) {
newItemId = item->getDecayTo();
}
if (newItemId < 0) {
internalRemoveItem(item);
return nullptr;
} else if (newItemId != newId) {
//Replacing the the old item with the new while maintaining the old position
Item* newItem = Item::CreateItem(newItemId, 1);
if (!newItem) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
return newItem;
}
return transformItem(item, newItemId);
}
} else {
cylinder->postRemoveNotification(item, cylinder, itemIndex);
uint16_t itemId = item->getID();
int32_t count = item->getSubType();
if (curType.id != newType.id) {
if (newType.group != curType.group) {
item->setDefaultSubtype();
}
itemId = newId;
}
if (newCount != -1 && newType.hasSubType()) {
count = newCount;
}
cylinder->updateThing(item, itemId, count);
cylinder->postAddNotification(item, cylinder, itemIndex);
return item;
}
}
//Replacing the old item with the new while maintaining the old position
Item* newItem;
if (newCount == -1) {
newItem = Item::CreateItem(newId);
} else {
newItem = Item::CreateItem(newId, newCount);
}
if (!newItem) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
if (newItem->getDuration() > 0) {
if (newItem->getDecaying() != DECAYING_TRUE) {
newItem->incrementReferenceCounter();
newItem->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(newItem);
}
}
return newItem;
}
ReturnValue Game::internalTeleport(Thing* thing, const Position& newPos, bool pushMove/* = true*/, uint32_t flags /*= 0*/)
{
if (newPos == thing->getPosition()) {
return RETURNVALUE_NOERROR;
} else if (thing->isRemoved()) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* toTile = map.getTile(newPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (Creature* creature = thing->getCreature()) {
ReturnValue ret = toTile->queryAdd(0, *creature, 1, FLAG_NOLIMIT);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(*creature, *toTile, !pushMove);
return RETURNVALUE_NOERROR;
} else if (Item* item = thing->getItem()) {
return internalMoveItem(item->getParent(), toTile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr, flags);
}
return RETURNVALUE_NOTPOSSIBLE;
}
Item* searchForItem(Container* container, uint16_t itemId)
{
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if ((*it)->getID() == itemId) {
return *it;
}
}
return nullptr;
}
slots_t getSlotType(const ItemType& it)
{
slots_t slot = CONST_SLOT_RIGHT;
if (it.weaponType != WeaponType_t::WEAPON_SHIELD) {
int32_t slotPosition = it.slotPosition;
if (slotPosition & SLOTP_HEAD) {
slot = CONST_SLOT_HEAD;
} else if (slotPosition & SLOTP_NECKLACE) {
slot = CONST_SLOT_NECKLACE;
} else if (slotPosition & SLOTP_ARMOR) {
slot = CONST_SLOT_ARMOR;
} else if (slotPosition & SLOTP_LEGS) {
slot = CONST_SLOT_LEGS;
} else if (slotPosition & SLOTP_FEET) {
slot = CONST_SLOT_FEET;
} else if (slotPosition & SLOTP_RING) {
slot = CONST_SLOT_RING;
} else if (slotPosition & SLOTP_AMMO) {
slot = CONST_SLOT_AMMO;
} else if (slotPosition & SLOTP_TWO_HAND || slotPosition & SLOTP_LEFT) {
slot = CONST_SLOT_LEFT;
}
}
return slot;
}
//Implementation of player invoked events
void Game::playerEquipItem(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Item* item = player->getInventoryItem(CONST_SLOT_BACKPACK);
if (!item) {
return;
}
Container* backpack = item->getContainer();
if (!backpack) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
slots_t slot = getSlotType(it);
Item* slotItem = player->getInventoryItem(slot);
Item* equipItem = searchForItem(backpack, it.id);
if (slotItem && slotItem->getID() == it.id && (!it.stackable || slotItem->getItemCount() == 100 || !equipItem)) {
internalMoveItem(slotItem->getParent(), player, CONST_SLOT_WHEREEVER, slotItem, slotItem->getItemCount(), nullptr);
} else if (equipItem) {
internalMoveItem(equipItem->getParent(), player, slot, equipItem, equipItem->getItemCount(), nullptr);
}
}
void Game::playerMove(uint32_t playerId, Direction direction)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->isMovementBlocked()) {
player->sendCancelWalk();
return;
}
player->resetIdleTime();
player->setNextWalkActionTask(nullptr);
player->startAutoWalk(direction);
}
bool Game::playerBroadcastMessage(Player* player, const std::string& text) const
{
if (!player->hasFlag(PlayerFlag_CanBroadcast)) {
return false;
}
std::cout << "> " << player->getName() << " broadcasted: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendPrivateMessage(player, TALKTYPE_BROADCAST, text);
}
return true;
}
void Game::playerCreatePrivateChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player || !player->isPremium()) {
return;
}
ChatChannel* channel = g_chat->createChannel(*player, CHANNEL_PRIVATE);
if (!channel || !channel->addUser(*player)) {
return;
}
player->sendCreatePrivateChannel(channel->getId(), channel->getName());
}
void Game::playerChannelInvite(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* invitePlayer = getPlayerByName(name);
if (!invitePlayer) {
return;
}
if (player == invitePlayer) {
return;
}
channel->invitePlayer(*player, *invitePlayer);
}
void Game::playerChannelExclude(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* excludePlayer = getPlayerByName(name);
if (!excludePlayer) {
return;
}
if (player == excludePlayer) {
return;
}
channel->excludePlayer(*player, *excludePlayer);
}
void Game::playerRequestChannels(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendChannelsDialog();
}
void Game::playerOpenChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
ChatChannel* channel = g_chat->addUserToChannel(*player, channelId);
if (!channel) {
return;
}
const InvitedMap* invitedUsers = channel->getInvitedUsers();
const UsersMap* users;
if (!channel->isPublicChannel()) {
users = &channel->getUsers();
} else {
users = nullptr;
}
player->sendChannel(channel->getId(), channel->getName(), users, invitedUsers);
}
void Game::playerCloseChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_chat->removeUserFromChannel(*player, channelId);
}
void Game::playerOpenPrivateChannel(uint32_t playerId, std::string& receiver)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!IOLoginData::formatPlayerName(receiver)) {
player->sendCancelMessage("A player with this name does not exist.");
return;
}
if (player->getName() == receiver) {
player->sendCancelMessage("You cannot set up a private message channel with yourself.");
return;
}
player->sendOpenPrivateChannel(receiver);
}
void Game::playerCloseNpcChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition());
for (Creature* spectator : spectators) {
if (Npc* npc = spectator->getNpc()) {
npc->onPlayerCloseChannel(player);
}
}
}
void Game::playerReceivePing(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->receivePing();
}
void Game::playerReceivePingBack(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendPingBack();
}
void Game::playerAutoWalk(uint32_t playerId, const std::vector<Direction>& listDir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkTask(nullptr);
player->startAutoWalk(listDir);
}
void Game::playerStopAutoWalk(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->stopWalk();
}
void Game::playerUseItemEx(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint16_t fromSpriteId,
const Position& toPos, uint8_t toStackPos, uint16_t toSpriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, fromSpriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != fromSpriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && toPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) &&
!Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::vector<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_USE_ITEM_EX_INTERVAL, std::bind(&Game::playerUseItemEx, this,
playerId, itemPos, itemStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItemEx, this,
playerId, fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, toPos, toStackPos, item, isHotkey);
}
void Game::playerUseItem(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint8_t index, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (pos.x == 0xFFFF && pos.y == 0 && pos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
ReturnValue ret = g_actions->canUse(player, pos);
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
std::vector<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_USE_ITEM_INTERVAL, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextWalkActionTask(task);
return;
}
ret = RETURNVALUE_THEREISNOWAY;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItem(player, pos, index, item, isHotkey);
}
void Game::playerUseWithCreature(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint32_t creatureId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!Position::areInRange<Map::maxClientViewportX - 1, Map::maxClientViewportY - 1, 0>(creature->getPosition(), player->getPosition())) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (!g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
if (creature->getPlayer() || isHotkey) {
player->sendCancelMessage(RETURNVALUE_DIRECTPLAYERSHOOT);
return;
}
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position toPos = creature->getPosition();
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::vector<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_USE_WITH_CREATURE_INTERVAL, std::bind(&Game::playerUseWithCreature, this,
playerId, itemPos, itemStackPos, creatureId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseWithCreature, this,
playerId, fromPos, fromStackPos, creatureId, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, creature->getPosition(), creature->getParent()->getThingIndex(creature), item, isHotkey, creature);
}
void Game::playerCloseContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeContainer(cid);
player->sendCloseContainer(cid);
}
void Game::playerMoveUpContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
Container* parentContainer = dynamic_cast<Container*>(container->getRealParent());
if (!parentContainer) {
Tile* tile = container->getTile();
if (!tile) {
return;
}
if (!g_events->eventPlayerOnBrowseField(player, tile->getPosition())) {
return;
}
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
parentContainer = new Container(tile);
parentContainer->incrementReferenceCounter();
browseFields[tile] = parentContainer;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
parentContainer = it->second;
}
}
player->addContainer(cid, parentContainer);
player->sendContainer(cid, parentContainer, parentContainer->hasParent(), player->getContainerIndex(cid));
}
void Game::playerUpdateContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
player->sendContainer(cid, container, container->hasParent(), player->getContainerIndex(cid));
}
void Game::playerRotateItem(uint32_t playerId, const Position& pos, uint8_t stackPos, const uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!thing) {
return;
}
Item* item = thing->getItem();
if (!item || item->getClientID() != spriteId || !item->isRotatable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (pos.x != 0xFFFF && !Position::areInRange<1, 1, 0>(pos, player->getPosition())) {
std::vector<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_ROTATE_ITEM_INTERVAL, std::bind(&Game::playerRotateItem, this,
playerId, pos, stackPos, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
uint16_t newId = Item::items[item->getID()].rotateTo;
if (newId != 0) {
transformItem(item, newId);
}
}
void Game::playerWriteItem(uint32_t playerId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint16_t maxTextLength = 0;
uint32_t internalWindowTextId = 0;
Item* writeItem = player->getWriteItem(internalWindowTextId, maxTextLength);
if (text.length() > maxTextLength || windowTextId != internalWindowTextId) {
return;
}
if (!writeItem || writeItem->isRemoved()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* topParent = writeItem->getTopParent();
Player* owner = dynamic_cast<Player*>(topParent);
if (owner && owner != player) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (!Position::areInRange<1, 1, 0>(writeItem->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_TEXTEDIT)) {
if (!creatureEvent->executeTextEdit(player, writeItem, text)) {
player->setWriteItem(nullptr);
return;
}
}
if (!text.empty()) {
if (writeItem->getText() != text) {
writeItem->setText(text);
writeItem->setWriter(player->getName());
writeItem->setDate(time(nullptr));
}
} else {
writeItem->resetText();
writeItem->resetWriter();
writeItem->resetDate();
}
uint16_t newId = Item::items[writeItem->getID()].writeOnceItemId;
if (newId != 0) {
transformItem(writeItem, newId);
}
player->setWriteItem(nullptr);
}
void Game::playerBrowseField(uint32_t playerId, const Position& pos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Position& playerPos = player->getPosition();
if (playerPos.z != pos.z) {
player->sendCancelMessage(playerPos.z > pos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, pos)) {
std::vector<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_BROWSE_FIELD_INTERVAL, std::bind(
&Game::playerBrowseField, this, playerId, pos
));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Tile* tile = map.getTile(pos);
if (!tile) {
return;
}
if (!g_events->eventPlayerOnBrowseField(player, pos)) {
return;
}
Container* container;
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
container = new Container(tile);
container->incrementReferenceCounter();
browseFields[tile] = container;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
container = it->second;
}
uint8_t dummyContainerId = 0xF - ((pos.x % 3) * 3 + (pos.y % 3));
Container* openContainer = player->getContainerByID(dummyContainerId);
if (openContainer) {
player->onCloseContainer(openContainer);
player->closeContainer(dummyContainerId);
} else {
player->addContainer(dummyContainerId, container);
player->sendContainer(dummyContainerId, container, false, 0);
}
}
void Game::playerSeekInContainer(uint32_t playerId, uint8_t containerId, uint16_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(containerId);
if (!container || !container->hasPagination()) {
return;
}
if ((index % container->capacity()) != 0 || index >= container->size()) {
return;
}
player->setContainerIndex(containerId, index);
player->sendContainer(containerId, container, container->hasParent(), index);
}
void Game::playerUpdateHouseWindow(uint32_t playerId, uint8_t listId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint32_t internalWindowTextId;
uint32_t internalListId;
House* house = player->getEditHouse(internalWindowTextId, internalListId);
if (house && house->canEditAccessList(internalListId, player) && internalWindowTextId == windowTextId && listId == 0) {
house->setAccessList(internalListId, text);
}
player->setEditHouse(nullptr);
}
void Game::playerWrapItem(uint32_t playerId, const Position& position, uint8_t stackPos, const uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, position, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!thing) {
return;
}
Item* item = thing->getItem();
if (!item || item->getClientID() != spriteId || !item->hasAttribute(ITEM_ATTRIBUTE_WRAPID) || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (position.x != 0xFFFF && !Position::areInRange<1, 1, 0>(position, player->getPosition())) {
std::vector<Direction> listDir;
if (player->getPathTo(position, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_WRAP_ITEM_INTERVAL, std::bind(&Game::playerWrapItem, this,
playerId, position, stackPos, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
g_events->eventPlayerOnWrapItem(player, item);
}
void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint32_t tradePlayerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = getPlayerByID(tradePlayerId);
if (!tradePartner || tradePartner == player) {
player->sendCancelMessage("Select a player to trade with.");
return;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition(), true, true)) {
player->sendCancelMessage(RETURNVALUE_CANNOTTHROW);
return;
}
Thing* tradeThing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!tradeThing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* tradeItem = tradeThing->getItem();
if (tradeItem->getClientID() != spriteId || !tradeItem->isPickupable() || tradeItem->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (g_config.getBoolean(ConfigManager::ONLY_INVITED_CAN_MOVE_HOUSE_ITEMS)) {
if (HouseTile* houseTile = dynamic_cast<HouseTile*>(tradeItem->getTile())) {
House* house = houseTile->getHouse();
if (house && !house->isInvited(player)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
if (playerPosition.z != tradeItemPosition.z) {
player->sendCancelMessage(playerPosition.z > tradeItemPosition.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(tradeItemPosition, playerPosition)) {
std::vector<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(RANGE_REQUEST_TRADE_INTERVAL, std::bind(&Game::playerRequestTrade, this,
playerId, pos, stackPos, tradePlayerId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Container* tradeItemContainer = tradeItem->getContainer();
if (tradeItemContainer) {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendCancelMessage("This item is already being traded.");
return;
}
if (tradeItemContainer->isHoldingItem(item)) {
player->sendCancelMessage("This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendCancelMessage("This item is already being traded.");
return;
}
}
} else {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendCancelMessage("This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendCancelMessage("This item is already being traded.");
return;
}
}
}
Container* tradeContainer = tradeItem->getContainer();
if (tradeContainer && tradeContainer->getItemHoldingCount() + 1 > 100) {
player->sendCancelMessage("You can only trade up to 100 objects at once.");
return;
}
if (!g_events->eventPlayerOnTradeRequest(player, tradePartner, tradeItem)) {
return;
}
internalStartTrade(player, tradePartner, tradeItem);
}
bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeItem)
{
if (player->tradeState != TRADE_NONE && !(player->tradeState == TRADE_ACKNOWLEDGE && player->tradePartner == tradePartner)) {
player->sendCancelMessage(RETURNVALUE_YOUAREALREADYTRADING);
return false;
} else if (tradePartner->tradeState != TRADE_NONE && tradePartner->tradePartner != player) {
player->sendCancelMessage(RETURNVALUE_THISPLAYERISALREADYTRADING);
return false;
}
player->tradePartner = tradePartner;
player->tradeItem = tradeItem;
player->tradeState = TRADE_INITIATED;
tradeItem->incrementReferenceCounter();
tradeItems[tradeItem] = player->getID();
player->sendTradeItemRequest(player->getName(), tradeItem, true);
if (tradePartner->tradeState == TRADE_NONE) {
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("{:s} wants to trade with you.", player->getName()));
tradePartner->tradeState = TRADE_ACKNOWLEDGE;
tradePartner->tradePartner = player;
} else {
Item* counterOfferItem = tradePartner->tradeItem;
player->sendTradeItemRequest(tradePartner->getName(), counterOfferItem, false);
tradePartner->sendTradeItemRequest(player->getName(), tradeItem, false);
}
return true;
}
void Game::playerAcceptTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!(player->getTradeState() == TRADE_ACKNOWLEDGE || player->getTradeState() == TRADE_INITIATED)) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
player->setTradeState(TRADE_ACCEPT);
if (tradePartner->getTradeState() == TRADE_ACCEPT) {
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition(), true, true)) {
internalCloseTrade(player, false);
player->sendCancelMessage(RETURNVALUE_CANNOTTHROW);
tradePartner->sendCancelMessage(RETURNVALUE_CANNOTTHROW);
return;
}
Item* playerTradeItem = player->tradeItem;
Item* partnerTradeItem = tradePartner->tradeItem;
if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, playerTradeItem, partnerTradeItem)) {
internalCloseTrade(player, false);
return;
}
player->setTradeState(TRADE_TRANSFER);
tradePartner->setTradeState(TRADE_TRANSFER);
auto it = tradeItems.find(playerTradeItem);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
it = tradeItems.find(partnerTradeItem);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
bool isSuccess = false;
ReturnValue tradePartnerRet = RETURNVALUE_NOERROR;
ReturnValue playerRet = RETURNVALUE_NOERROR;
// if player is trying to trade its own backpack
if (tradePartner->getInventoryItem(CONST_SLOT_BACKPACK) == partnerTradeItem) {
tradePartnerRet = (tradePartner->getInventoryItem(getSlotType(Item::items[playerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR);
}
if (player->getInventoryItem(CONST_SLOT_BACKPACK) == playerTradeItem) {
playerRet = (player->getInventoryItem(getSlotType(Item::items[partnerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR);
}
// both players try to trade equipped backpacks
if (player->getInventoryItem(CONST_SLOT_BACKPACK) == playerTradeItem && tradePartner->getInventoryItem(CONST_SLOT_BACKPACK) == partnerTradeItem) {
playerRet = RETURNVALUE_NOTENOUGHROOM;
}
if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) {
tradePartnerRet = internalAddItem(tradePartner, playerTradeItem, INDEX_WHEREEVER, 0, true);
playerRet = internalAddItem(player, partnerTradeItem, INDEX_WHEREEVER, 0, true);
if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) {
playerRet = internalRemoveItem(playerTradeItem, playerTradeItem->getItemCount(), true);
tradePartnerRet = internalRemoveItem(partnerTradeItem, partnerTradeItem->getItemCount(), true);
if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) {
tradePartnerRet = internalMoveItem(playerTradeItem->getParent(), tradePartner, INDEX_WHEREEVER, playerTradeItem, playerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK, nullptr, partnerTradeItem);
if (tradePartnerRet == RETURNVALUE_NOERROR) {
internalMoveItem(partnerTradeItem->getParent(), player, INDEX_WHEREEVER, partnerTradeItem, partnerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK);
playerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, tradePartner);
partnerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, player);
isSuccess = true;
}
}
}
}
if (!isSuccess) {
std::string errorDescription;
if (tradePartner->tradeItem) {
errorDescription = getTradeErrorDescription(tradePartnerRet, playerTradeItem);
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
}
if (player->tradeItem) {
errorDescription = getTradeErrorDescription(playerRet, partnerTradeItem);
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
}
}
g_events->eventPlayerOnTradeCompleted(player, tradePartner, playerTradeItem, partnerTradeItem, isSuccess);
player->setTradeState(TRADE_NONE);
player->tradeItem = nullptr;
player->tradePartner = nullptr;
player->sendTradeClose();
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradeItem = nullptr;
tradePartner->tradePartner = nullptr;
tradePartner->sendTradeClose();
}
}
std::string Game::getTradeErrorDescription(ReturnValue ret, Item* item)
{
if (item) {
if (ret == RETURNVALUE_NOTENOUGHCAPACITY) {
return fmt::format("You do not have enough capacity to carry {:s}.\n {:s}", item->isStackable() && item->getItemCount() > 1 ? "these objects" : "this object", item->getWeightDescription());
} else if (ret == RETURNVALUE_NOTENOUGHROOM || ret == RETURNVALUE_CONTAINERNOTENOUGHROOM) {
return fmt::format("You do not have enough room to carry {:s}.", item->isStackable() && item->getItemCount() > 1 ? "these objects" : "this object");
}
}
return "Trade could not be completed.";
}
void Game::playerLookInTrade(uint32_t playerId, bool lookAtCounterOffer, uint8_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
Item* tradeItem;
if (lookAtCounterOffer) {
tradeItem = tradePartner->getTradeItem();
} else {
tradeItem = player->getTradeItem();
}
if (!tradeItem) {
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
int32_t lookDistance = std::max<int32_t>(Position::getDistanceX(playerPosition, tradeItemPosition),
Position::getDistanceY(playerPosition, tradeItemPosition));
if (index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance);
return;
}
Container* tradeContainer = tradeItem->getContainer();
if (!tradeContainer) {
return;
}
std::vector<const Container*> containers {tradeContainer};
size_t i = 0;
while (i < containers.size()) {
const Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
}
if (--index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, item, lookDistance);
return;
}
}
}
}
void Game::playerCloseTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
internalCloseTrade(player);
}
void Game::internalCloseTrade(Player* player, bool sendCancel/* = true*/)
{
Player* tradePartner = player->tradePartner;
if ((tradePartner && tradePartner->getTradeState() == TRADE_TRANSFER) || player->getTradeState() == TRADE_TRANSFER) {
return;
}
if (player->getTradeItem()) {
auto it = tradeItems.find(player->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
player->tradeItem = nullptr;
}
player->setTradeState(TRADE_NONE);
player->tradePartner = nullptr;
if (sendCancel) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
}
player->sendTradeClose();
if (tradePartner) {
if (tradePartner->getTradeItem()) {
auto it = tradeItems.find(tradePartner->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
tradePartner->tradeItem = nullptr;
}
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradePartner = nullptr;
if (sendCancel) {
tradePartner->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
}
tradePartner->sendTradeClose();
}
}
void Game::playerPurchaseItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount,
bool ignoreCap/* = false*/, bool inBackpacks/* = false*/)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
merchant->onPlayerTrade(player, onBuy, it.id, subType, amount, ignoreCap, inBackpacks);
}
void Game::playerSellItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreEquipped)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
merchant->onPlayerTrade(player, onSell, it.id, subType, amount, ignoreEquipped);
}
void Game::playerCloseShop(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeShopWindow();
}
void Game::playerLookInShop(uint32_t playerId, uint16_t spriteId, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
int32_t subType;
if (it.isFluidContainer() || it.isSplash()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
const std::string& description = Item::getDescription(it, 1, nullptr, subType);
g_events->eventPlayerOnLookInShop(player, &it, subType, description);
}
void Game::playerLookAt(uint32_t playerId, const Position& pos, uint8_t stackPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_LOOK);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position thingPos = thing->getPosition();
if (!player->canSee(thingPos)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position playerPos = player->getPosition();
int32_t lookDistance;
if (thing != player) {
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, thingPos), Position::getDistanceY(playerPos, thingPos));
if (playerPos.z != thingPos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLook(player, pos, thing, stackPos, lookDistance);
}
void Game::playerLookInBattleList(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!player->canSeeCreature(creature)) {
return;
}
const Position& creaturePos = creature->getPosition();
if (!player->canSee(creaturePos)) {
return;
}
int32_t lookDistance;
if (creature != player) {
const Position& playerPos = player->getPosition();
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, creaturePos), Position::getDistanceY(playerPos, creaturePos));
if (playerPos.z != creaturePos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLookInBattleList(player, creature, lookDistance);
}
void Game::playerCancelAttackAndFollow(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerSetAttackedCreature(playerId, 0);
playerFollowCreature(playerId, 0);
player->stopWalk();
}
void Game::playerSetAttackedCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->getAttackedCreature() && creatureId == 0) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
Creature* attackCreature = getCreatureByID(creatureId);
if (!attackCreature) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
ReturnValue ret = Combat::canTargetCreature(player, attackCreature);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
player->sendCancelTarget();
player->setAttackedCreature(nullptr);
return;
}
player->setAttackedCreature(attackCreature);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
}
void Game::playerFollowCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setAttackedCreature(nullptr);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
player->setFollowCreature(getCreatureByID(creatureId));
}
void Game::playerSetFightModes(uint32_t playerId, fightMode_t fightMode, bool chaseMode, bool secureMode)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setFightMode(fightMode);
player->setChaseMode(chaseMode);
player->setSecureMode(secureMode);
}
void Game::playerRequestAddVip(uint32_t playerId, const std::string& name)
{
if (name.length() > PLAYER_NAME_LENGTH) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* vipPlayer = getPlayerByName(name);
if (!vipPlayer) {
uint32_t guid;
bool specialVip;
std::string formattedName = name;
if (!IOLoginData::getGuidByNameEx(guid, specialVip, formattedName)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name does not exist.");
return;
}
if (specialVip && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
player->addVIP(guid, formattedName, VIPSTATUS_OFFLINE);
} else {
if (vipPlayer->hasFlag(PlayerFlag_SpecialVIP) && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
if (!vipPlayer->isInGhostMode() || player->canSeeGhostMode(vipPlayer)) {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_ONLINE);
} else {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_OFFLINE);
}
}
}
void Game::playerRequestRemoveVip(uint32_t playerId, uint32_t guid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->removeVIP(guid);
}
void Game::playerRequestEditVip(uint32_t playerId, uint32_t guid, const std::string& description, uint32_t icon, bool notify)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->editVIP(guid, description, icon, notify);
}
void Game::playerTurn(uint32_t playerId, Direction dir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!g_events->eventPlayerOnTurn(player, dir)) {
return;
}
player->resetIdleTime();
internalCreatureTurn(player, dir);
}
void Game::playerRequestOutfit(uint32_t playerId)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendOutfitWindow();
}
void Game::playerToggleMount(uint32_t playerId, bool mount)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->toggleMount(mount);
}
void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(player->getSex(), outfit.lookType);
if (!playerOutfit) {
outfit.lookMount = 0;
}
if (outfit.lookMount != 0) {
Mount* mount = mounts.getMountByClientID(outfit.lookMount);
if (!mount) {
return;
}
if (!player->hasMount(mount)) {
return;
}
int32_t speedChange = mount->speed;
if (player->isMounted()) {
Mount* prevMount = mounts.getMountByID(player->getCurrentMount());
if (prevMount) {
speedChange -= prevMount->speed;
}
}
changeSpeed(player, speedChange);
player->setCurrentMount(mount->id);
} else if (player->isMounted()) {
player->dismount();
}
if (player->canWear(outfit.lookType, outfit.lookAddons)) {
player->defaultOutfit = outfit;
if (player->hasCondition(CONDITION_OUTFIT)) {
return;
}
internalCreatureChangeOutfit(player, outfit);
}
}
void Game::playerShowQuestLog(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendQuestLog();
}
void Game::playerShowQuestLine(uint32_t playerId, uint16_t questId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Quest* quest = quests.getQuestByID(questId);
if (!quest) {
return;
}
player->sendQuestLine(quest);
}
void Game::playerSay(uint32_t playerId, uint16_t channelId, SpeakClasses type,
const std::string& receiver, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
if (playerSaySpell(player, type, text)) {
return;
}
if (type == TALKTYPE_PRIVATE_PN) {
playerSpeakToNpc(player, text);
return;
}
uint32_t muteTime = player->isMuted();
if (muteTime > 0) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You are still muted for {:d} seconds.", muteTime));
return;
}
if (!text.empty() && text.front() == '/' && player->isAccessPlayer()) {
return;
}
player->removeMessageBuffer();
switch (type) {
case TALKTYPE_SAY:
internalCreatureSay(player, TALKTYPE_SAY, text, false);
break;
case TALKTYPE_WHISPER:
playerWhisper(player, text);
break;
case TALKTYPE_YELL:
playerYell(player, text);
break;
case TALKTYPE_PRIVATE_TO:
case TALKTYPE_PRIVATE_RED_TO:
playerSpeakTo(player, type, receiver, text);
break;
case TALKTYPE_CHANNEL_O:
case TALKTYPE_CHANNEL_Y:
case TALKTYPE_CHANNEL_R1:
g_chat->talkToChannel(*player, type, text, channelId);
break;
case TALKTYPE_BROADCAST:
playerBroadcastMessage(player, text);
break;
default:
break;
}
}
bool Game::playerSaySpell(Player* player, SpeakClasses type, const std::string& text)
{
std::string words = text;
TalkActionResult_t result = g_talkActions->playerSaySpell(player, type, words);
if (result == TALKACTION_BREAK) {
return true;
}
result = g_spells->playerSaySpell(player, words);
if (result == TALKACTION_BREAK) {
if (!g_config.getBoolean(ConfigManager::EMOTE_SPELLS)) {
return internalCreatureSay(player, TALKTYPE_SPELL, words, false);
} else {
return internalCreatureSay(player, TALKTYPE_MONSTER_SAY, words, false);
}
} else if (result == TALKACTION_FAILED) {
return true;
}
return false;
}
void Game::playerWhisper(Player* player, const std::string& text)
{
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition(), false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
//send to client
for (Creature* spectator : spectators) {
if (Player* spectatorPlayer = spectator->getPlayer()) {
if (!Position::areInRange<1, 1>(player->getPosition(), spectatorPlayer->getPosition())) {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, "pspsps");
} else {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
bool Game::playerYell(Player* player, const std::string& text)
{
if (player->hasCondition(CONDITION_YELLTICKS)) {
player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
return false;
}
if (!player->isAccessPlayer() && !player->hasFlag(PlayerFlag_IgnoreYellCheck)) {
uint32_t minimumLevel = g_config.getNumber(ConfigManager::YELL_MINIMUM_LEVEL);
if (player->getLevel() < minimumLevel) {
if (g_config.getBoolean(ConfigManager::YELL_ALLOW_PREMIUM)) {
if (!player->isPremium()) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not yell unless you have reached level {:d} or have a premium account.", minimumLevel));
return false;
}
} else {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not yell unless you have reached level {:d}.", minimumLevel));
return false;
}
}
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_YELLTICKS, 30000, 0);
player->addCondition(condition);
}
internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false);
return true;
}
bool Game::playerSpeakTo(Player* player, SpeakClasses type, const std::string& receiver,
const std::string& text)
{
Player* toPlayer = getPlayerByName(receiver);
if (!toPlayer) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
return false;
}
if (type == TALKTYPE_PRIVATE_RED_TO && (player->hasFlag(PlayerFlag_CanTalkRedPrivate) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER)) {
type = TALKTYPE_PRIVATE_RED_FROM;
} else {
type = TALKTYPE_PRIVATE_FROM;
}
if (!player->isAccessPlayer() && !player->hasFlag(PlayerFlag_IgnoreSendPrivateCheck)) {
uint32_t minimumLevel = g_config.getNumber(ConfigManager::MINIMUM_LEVEL_TO_SEND_PRIVATE);
if (player->getLevel() < minimumLevel) {
if (g_config.getBoolean(ConfigManager::PREMIUM_TO_SEND_PRIVATE)) {
if (!player->isPremium()) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not send private messages unless you have reached level {:d} or have a premium account.", minimumLevel));
return false;
}
} else {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not send private messages unless you have reached level {:d}.", minimumLevel));
return false;
}
}
}
toPlayer->sendPrivateMessage(player, type, text);
toPlayer->onCreatureSay(player, type, text);
if (toPlayer->isInGhostMode() && !player->canSeeGhostMode(toPlayer)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
} else {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("Message sent to {:s}.", toPlayer->getName()));
}
return true;
}
void Game::playerSpeakToNpc(Player* player, const std::string& text)
{
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition());
for (Creature* spectator : spectators) {
if (spectator->getNpc()) {
spectator->onCreatureSay(player, TALKTYPE_PRIVATE_PN, text);
}
}
}
//--
bool Game::canThrowObjectTo(const Position& fromPos, const Position& toPos, bool checkLineOfSight /*= true*/, bool sameFloor /*= false*/,
int32_t rangex /*= Map::maxClientViewportX*/, int32_t rangey /*= Map::maxClientViewportY*/) const
{
return map.canThrowObjectTo(fromPos, toPos, checkLineOfSight, sameFloor, rangex, rangey);
}
bool Game::isSightClear(const Position& fromPos, const Position& toPos, bool sameFloor /*= false*/) const
{
return map.isSightClear(fromPos, toPos, sameFloor);
}
bool Game::internalCreatureTurn(Creature* creature, Direction dir)
{
if (creature->getDirection() == dir) {
return false;
}
creature->setDirection(dir);
//send to client
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureTurn(creature);
}
return true;
}
bool Game::internalCreatureSay(Creature* creature, SpeakClasses type, const std::string& text,
bool ghostMode, SpectatorVec* spectatorsPtr/* = nullptr*/, const Position* pos/* = nullptr*/)
{
if (text.empty()) {
return false;
}
if (!pos) {
pos = &creature->getPosition();
}
SpectatorVec spectators;
if (!spectatorsPtr || spectatorsPtr->empty()) {
// This somewhat complex construct ensures that the cached SpectatorVec
// is used if available and if it can be used, else a local vector is
// used (hopefully the compiler will optimize away the construction of
// the temporary when it's not used).
if (type != TALKTYPE_YELL && type != TALKTYPE_MONSTER_YELL) {
map.getSpectators(spectators, *pos, false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
} else {
map.getSpectators(spectators, *pos, true, false,
(Map::maxClientViewportX * 2) + 2, (Map::maxClientViewportX * 2) + 2,
(Map::maxClientViewportY * 2) + 2, (Map::maxClientViewportY * 2) + 2);
}
} else {
spectators = (*spectatorsPtr);
}
//send to client
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
if (!ghostMode || tmpPlayer->canSeeCreature(creature)) {
tmpPlayer->sendCreatureSay(creature, type, text, pos);
}
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onCreatureSay(creature, type, text);
if (creature != spectator) {
g_events->eventCreatureOnHear(spectator, creature, text, type);
}
}
return true;
}
void Game::checkCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onWalk();
cleanup();
}
}
void Game::updateCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->goToFollowCreature();
}
}
void Game::checkCreatureAttack(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onAttacking(0);
}
}
void Game::addCreatureCheck(Creature* creature)
{
creature->creatureCheck = true;
if (creature->inCheckCreaturesVector) {
// already in a vector
return;
}
creature->inCheckCreaturesVector = true;
checkCreatureLists[uniform_random(0, EVENT_CREATURECOUNT - 1)].push_back(creature);
creature->incrementReferenceCounter();
}
void Game::removeCreatureCheck(Creature* creature)
{
if (creature->inCheckCreaturesVector) {
creature->creatureCheck = false;
}
}
void Game::checkCreatures(size_t index)
{
g_scheduler.addEvent(createSchedulerTask(EVENT_CHECK_CREATURE_INTERVAL, std::bind(&Game::checkCreatures, this, (index + 1) % EVENT_CREATURECOUNT)));
auto& checkCreatureList = checkCreatureLists[index];
auto it = checkCreatureList.begin(), end = checkCreatureList.end();
while (it != end) {
Creature* creature = *it;
if (creature->creatureCheck) {
if (creature->getHealth() > 0) {
creature->onThink(EVENT_CREATURE_THINK_INTERVAL);
creature->onAttacking(EVENT_CREATURE_THINK_INTERVAL);
creature->executeConditions(EVENT_CREATURE_THINK_INTERVAL);
}
++it;
} else {
creature->inCheckCreaturesVector = false;
it = checkCreatureList.erase(it);
ReleaseCreature(creature);
}
}
cleanup();
}
void Game::changeSpeed(Creature* creature, int32_t varSpeedDelta)
{
int32_t varSpeed = creature->getSpeed() - creature->getBaseSpeed();
varSpeed += varSpeedDelta;
creature->setSpeed(varSpeed);
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), false, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendChangeSpeed(creature, creature->getStepSpeed());
}
}
void Game::internalCreatureChangeOutfit(Creature* creature, const Outfit_t& outfit)
{
if (!g_events->eventCreatureOnChangeOutfit(creature, outfit)) {
return;
}
creature->setCurrentOutfit(outfit);
if (creature->isInvisible()) {
return;
}
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureChangeOutfit(creature, outfit);
}
}
void Game::internalCreatureChangeVisible(Creature* creature, bool visible)
{
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureChangeVisible(creature, visible);
}
}
void Game::changeLight(const Creature* creature)
{
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureLight(creature);
}
}
bool Game::combatBlockHit(CombatDamage& damage, Creature* attacker, Creature* target, bool checkDefense, bool checkArmor, bool field, bool ignoreResistances /*= false */)
{
if (damage.primary.type == COMBAT_NONE && damage.secondary.type == COMBAT_NONE) {
return true;
}
if (target->getPlayer() && target->isInGhostMode()) {
return true;
}
if (damage.primary.value > 0) {
return false;
}
static const auto sendBlockEffect = [this](BlockType_t blockType, CombatType_t combatType, const Position& targetPos) {
if (blockType == BLOCK_DEFENSE) {
addMagicEffect(targetPos, CONST_ME_POFF);
} else if (blockType == BLOCK_ARMOR) {
addMagicEffect(targetPos, CONST_ME_BLOCKHIT);
} else if (blockType == BLOCK_IMMUNITY) {
uint8_t hitEffect = 0;
switch (combatType) {
case COMBAT_UNDEFINEDDAMAGE: {
return;
}
case COMBAT_ENERGYDAMAGE:
case COMBAT_FIREDAMAGE:
case COMBAT_PHYSICALDAMAGE:
case COMBAT_ICEDAMAGE:
case COMBAT_DEATHDAMAGE: {
hitEffect = CONST_ME_BLOCKHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
hitEffect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_HOLYDAMAGE: {
hitEffect = CONST_ME_HOLYDAMAGE;
break;
}
default: {
hitEffect = CONST_ME_POFF;
break;
}
}
addMagicEffect(targetPos, hitEffect);
}
};
BlockType_t primaryBlockType, secondaryBlockType;
if (damage.primary.type != COMBAT_NONE) {
damage.primary.value = -damage.primary.value;
primaryBlockType = target->blockHit(attacker, damage.primary.type, damage.primary.value, checkDefense, checkArmor, field, ignoreResistances);
damage.primary.value = -damage.primary.value;
sendBlockEffect(primaryBlockType, damage.primary.type, target->getPosition());
} else {
primaryBlockType = BLOCK_NONE;
}
if (damage.secondary.type != COMBAT_NONE) {
damage.secondary.value = -damage.secondary.value;
secondaryBlockType = target->blockHit(attacker, damage.secondary.type, damage.secondary.value, false, false, field, ignoreResistances);
damage.secondary.value = -damage.secondary.value;
sendBlockEffect(secondaryBlockType, damage.secondary.type, target->getPosition());
} else {
secondaryBlockType = BLOCK_NONE;
}
damage.blockType = primaryBlockType;
return (primaryBlockType != BLOCK_NONE) && (secondaryBlockType != BLOCK_NONE);
}
void Game::combatGetTypeInfo(CombatType_t combatType, Creature* target, TextColor_t& color, uint8_t& effect)
{
switch (combatType) {
case COMBAT_PHYSICALDAMAGE: {
Item* splash = nullptr;
switch (target->getRace()) {
case RACE_VENOM:
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_HITBYPOISON;
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_SLIME);
break;
case RACE_BLOOD:
color = TEXTCOLOR_RED;
effect = CONST_ME_DRAWBLOOD;
if (const Tile* tile = target->getTile()) {
if (!tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_BLOOD);
}
}
break;
case RACE_UNDEAD:
color = TEXTCOLOR_LIGHTGREY;
effect = CONST_ME_HITAREA;
break;
case RACE_FIRE:
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_DRAWBLOOD;
break;
case RACE_ENERGY:
color = TEXTCOLOR_ELECTRICPURPLE;
effect = CONST_ME_ENERGYHIT;
break;
default:
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
if (splash) {
internalAddItem(target->getTile(), splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
startDecay(splash);
}
break;
}
case COMBAT_ENERGYDAMAGE: {
color = TEXTCOLOR_ELECTRICPURPLE;
effect = CONST_ME_ENERGYHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_DROWNDAMAGE: {
color = TEXTCOLOR_LIGHTBLUE;
effect = CONST_ME_LOSEENERGY;
break;
}
case COMBAT_FIREDAMAGE: {
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_HITBYFIRE;
break;
}
case COMBAT_ICEDAMAGE: {
color = TEXTCOLOR_SKYBLUE;
effect = CONST_ME_ICEATTACK;
break;
}
case COMBAT_HOLYDAMAGE: {
color = TEXTCOLOR_YELLOW;
effect = CONST_ME_HOLYDAMAGE;
break;
}
case COMBAT_DEATHDAMAGE: {
color = TEXTCOLOR_DARKRED;
effect = CONST_ME_SMALLCLOUDS;
break;
}
case COMBAT_LIFEDRAIN: {
color = TEXTCOLOR_RED;
effect = CONST_ME_MAGIC_RED;
break;
}
default: {
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
}
}
bool Game::combatChangeHealth(Creature* attacker, Creature* target, CombatDamage& damage)
{
const Position& targetPos = target->getPosition();
if (damage.primary.value > 0) {
if (target->getHealth() <= 0) {
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t realHealthChange = target->getHealth();
target->gainHealth(attacker, damage.primary.value);
realHealthChange = target->getHealth() - realHealthChange;
if (realHealthChange > 0 && !target->isInGhostMode()) {
auto damageString = fmt::format("{:d} hitpoint{:s}", realHealthChange, realHealthChange != 1 ? "s" : "");
std::string spectatorMessage;
TextMessage message;
message.position = targetPos;
message.primary.value = realHealthChange;
message.primary.color = TEXTCOLOR_PASTELRED;
SpectatorVec spectators;
map.getSpectators(spectators, targetPos, false, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_HEALED;
message.text = fmt::format("You heal {:s} for {:s}.", target->getNameDescription(), damageString);
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_HEALED;
if (!attacker) {
message.text = fmt::format("You were healed for {:s}.", damageString);
} else if (targetPlayer == attackerPlayer) {
message.text = fmt::format("You healed yourself for {:s}.", damageString);
} else {
message.text = fmt::format("You were healed by {:s} for {:s}.", attacker->getNameDescription(), damageString);
}
} else {
message.type = MESSAGE_HEALED_OTHERS;
if (spectatorMessage.empty()) {
if (!attacker) {
spectatorMessage = fmt::format("{:s} was healed for {:s}.", target->getNameDescription(), damageString);
} else if (attacker == target) {
spectatorMessage = fmt::format("{:s} healed {:s}self for {:s}.", attacker->getNameDescription(), targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "him") : "it", damageString);
} else {
spectatorMessage = fmt::format("{:s} healed {:s} for {:s}.", attacker->getNameDescription(), target->getNameDescription(), damageString);
}
spectatorMessage[0] = std::toupper(spectatorMessage[0]);
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
} else {
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return true;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
damage.primary.value = std::abs(damage.primary.value);
damage.secondary.value = std::abs(damage.secondary.value);
int32_t healthChange = damage.primary.value + damage.secondary.value;
if (healthChange == 0) {
return true;
}
TextMessage message;
message.position = targetPos;
SpectatorVec spectators;
if (targetPlayer && target->hasCondition(CONDITION_MANASHIELD) && damage.primary.type != COMBAT_UNDEFINEDDAMAGE) {
int32_t manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange);
if (manaDamage != 0) {
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
healthChange = damage.primary.value + damage.secondary.value;
if (healthChange == 0) {
return true;
}
manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange);
}
}
targetPlayer->drainMana(attacker, manaDamage);
map.getSpectators(spectators, targetPos, true, true);
addMagicEffect(spectators, targetPos, CONST_ME_LOSEENERGY);
std::string spectatorMessage;
message.primary.value = manaDamage;
message.primary.color = TEXTCOLOR_BLUE;
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_DAMAGE_DEALT;
message.text = fmt::format("{:s} loses {:d} mana due to your attack.", target->getNameDescription(), manaDamage);
message.text[0] = std::toupper(message.text[0]);
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_DAMAGE_RECEIVED;
if (!attacker) {
message.text = fmt::format("You lose {:d} mana.", manaDamage);
} else if (targetPlayer == attackerPlayer) {
message.text = fmt::format("You lose {:d} mana due to your own attack.", manaDamage);
} else {
message.text = fmt::format("You lose {:d} mana due to an attack by {:s}.", manaDamage, attacker->getNameDescription());
}
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
if (spectatorMessage.empty()) {
if (!attacker) {
spectatorMessage = fmt::format("{:s} loses {:d} mana.", target->getNameDescription(), manaDamage);
} else if (attacker == target) {
spectatorMessage = fmt::format("{:s} loses {:d} mana due to {:s} own attack.", target->getNameDescription(), manaDamage, targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his");
} else {
spectatorMessage = fmt::format("{:s} loses {:d} mana due to an attack by {:s}.", target->getNameDescription(), manaDamage, attacker->getNameDescription());
}
spectatorMessage[0] = std::toupper(spectatorMessage[0]);
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
damage.primary.value -= manaDamage;
if (damage.primary.value < 0) {
damage.secondary.value = std::max<int32_t>(0, damage.secondary.value + damage.primary.value);
damage.primary.value = 0;
}
}
}
int32_t realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t targetHealth = target->getHealth();
if (damage.primary.value >= targetHealth) {
damage.primary.value = targetHealth;
damage.secondary.value = 0;
} else if (damage.secondary.value) {
damage.secondary.value = std::min<int32_t>(damage.secondary.value, targetHealth - damage.primary.value);
}
realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
}
if (spectators.empty()) {
map.getSpectators(spectators, targetPos, true, true);
}
message.primary.value = damage.primary.value;
message.secondary.value = damage.secondary.value;
uint8_t hitEffect;
if (message.primary.value) {
combatGetTypeInfo(damage.primary.type, target, message.primary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(spectators, targetPos, hitEffect);
}
}
if (message.secondary.value) {
combatGetTypeInfo(damage.secondary.type, target, message.secondary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(spectators, targetPos, hitEffect);
}
}
if (message.primary.color != TEXTCOLOR_NONE || message.secondary.color != TEXTCOLOR_NONE) {
auto damageString = fmt::format("{:d} hitpoint{:s}", realDamage, realDamage != 1 ? "s" : "");
std::string spectatorMessage;
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_DAMAGE_DEALT;
message.text = fmt::format("{:s} loses {:s} due to your attack.", target->getNameDescription(), damageString);
message.text[0] = std::toupper(message.text[0]);
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_DAMAGE_RECEIVED;
if (!attacker) {
message.text = fmt::format("You lose {:s}.", damageString);
} else if (targetPlayer == attackerPlayer) {
message.text = fmt::format("You lose {:s} due to your own attack.", damageString);
} else {
message.text = fmt::format("You lose {:s} due to an attack by {:s}.", damageString, attacker->getNameDescription());
}
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
if (spectatorMessage.empty()) {
if (!attacker) {
spectatorMessage = fmt::format("{:s} loses {:s}.", target->getNameDescription(), damageString);
} else if (attacker == target) {
spectatorMessage = fmt::format("{:s} loses {:s} due to {:s} own attack.", target->getNameDescription(), damageString, targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his") : "its");
} else {
spectatorMessage = fmt::format("{:s} loses {:s} due to an attack by {:s}.", target->getNameDescription(), damageString, attacker->getNameDescription());
}
spectatorMessage[0] = std::toupper(spectatorMessage[0]);
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
if (realDamage >= targetHealth) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) {
if (!creatureEvent->executeOnPrepareDeath(target, attacker)) {
return false;
}
}
}
target->drainHealth(attacker, realDamage);
addCreatureHealth(spectators, target);
}
return true;
}
bool Game::combatChangeMana(Creature* attacker, Creature* target, CombatDamage& damage)
{
Player* targetPlayer = target->getPlayer();
if (!targetPlayer) {
return true;
}
int32_t manaChange = damage.primary.value + damage.secondary.value;
if (manaChange > 0) {
if (attacker) {
const Player* attackerPlayer = attacker->getPlayer();
if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(target) == SKULL_NONE) {
return false;
}
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeMana(attacker, target, damage);
}
}
int32_t realManaChange = targetPlayer->getMana();
targetPlayer->changeMana(manaChange);
realManaChange = targetPlayer->getMana() - realManaChange;
if (realManaChange > 0 && !targetPlayer->isInGhostMode()) {
TextMessage message(MESSAGE_HEALED, "You gained " + std::to_string(realManaChange) + " mana.");
message.position = target->getPosition();
message.primary.value = realManaChange;
message.primary.color = TEXTCOLOR_MAYABLUE;
targetPlayer->sendTextMessage(message);
}
} else {
const Position& targetPos = target->getPosition();
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
int32_t manaLoss = std::min<int32_t>(targetPlayer->getMana(), -manaChange);
BlockType_t blockType = target->blockHit(attacker, COMBAT_MANADRAIN, manaLoss);
if (blockType != BLOCK_NONE) {
addMagicEffect(targetPos, CONST_ME_POFF);
return false;
}
if (manaLoss <= 0) {
return true;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeMana(attacker, target, damage);
}
}
targetPlayer->drainMana(attacker, manaLoss);
std::string spectatorMessage;
TextMessage message;
message.position = targetPos;
message.primary.value = manaLoss;
message.primary.color = TEXTCOLOR_BLUE;
SpectatorVec spectators;
map.getSpectators(spectators, targetPos, false, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_DAMAGE_DEALT;
message.text = fmt::format("{:s} loses {:d} mana due to your attack.", target->getNameDescription(), manaLoss);
message.text[0] = std::toupper(message.text[0]);
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_DAMAGE_RECEIVED;
if (!attacker) {
message.text = fmt::format("You lose {:d} mana.", manaLoss);
} else if (targetPlayer == attackerPlayer) {
message.text = fmt::format("You lose {:d} mana due to your own attack.", manaLoss);
} else {
message.text = fmt::format("You lose {:d} mana due to an attack by {:s}.", manaLoss, attacker->getNameDescription());
}
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
if (spectatorMessage.empty()) {
if (!attacker) {
spectatorMessage = fmt::format("{:s} loses {:d} mana.", target->getNameDescription(), manaLoss);
} else if (attacker == target) {
spectatorMessage = fmt::format("{:s} loses {:d} mana due to {:s} own attack.", target->getNameDescription(), manaLoss, targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his");
} else {
spectatorMessage = fmt::format("{:s} loses {:d} mana due to an attack by {:s}.", target->getNameDescription(), manaLoss, attacker->getNameDescription());
}
spectatorMessage[0] = std::toupper(spectatorMessage[0]);
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
return true;
}
void Game::addCreatureHealth(const Creature* target)
{
SpectatorVec spectators;
map.getSpectators(spectators, target->getPosition(), true, true);
addCreatureHealth(spectators, target);
}
void Game::addCreatureHealth(const SpectatorVec& spectators, const Creature* target)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureHealth(target);
}
}
}
void Game::addMagicEffect(const Position& pos, uint8_t effect)
{
SpectatorVec spectators;
map.getSpectators(spectators, pos, true, true);
addMagicEffect(spectators, pos, effect);
}
void Game::addMagicEffect(const SpectatorVec& spectators, const Position& pos, uint8_t effect)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendMagicEffect(pos, effect);
}
}
}
void Game::addDistanceEffect(const Position& fromPos, const Position& toPos, uint8_t effect)
{
SpectatorVec spectators, toPosSpectators;
map.getSpectators(spectators, fromPos, true, true);
map.getSpectators(toPosSpectators, toPos, true, true);
spectators.addSpectators(toPosSpectators);
addDistanceEffect(spectators, fromPos, toPos, effect);
}
void Game::addDistanceEffect(const SpectatorVec& spectators, const Position& fromPos, const Position& toPos, uint8_t effect)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendDistanceShoot(fromPos, toPos, effect);
}
}
}
void Game::setAccountStorageValue(const uint32_t accountId, const uint32_t key, const int32_t value)
{
if (value == -1) {
accountStorageMap[accountId].erase(key);
return;
}
accountStorageMap[accountId][key] = value;
}
int32_t Game::getAccountStorageValue(const uint32_t accountId, const uint32_t key) const
{
const auto& accountMapIt = accountStorageMap.find(accountId);
if (accountMapIt != accountStorageMap.end()) {
const auto& storageMapIt = accountMapIt->second.find(key);
if (storageMapIt != accountMapIt->second.end()) {
return storageMapIt->second;
}
}
return -1;
}
void Game::loadAccountStorageValues()
{
Database& db = Database::getInstance();
DBResult_ptr result;
if ((result = db.storeQuery("SELECT `account_id`, `key`, `value` FROM `account_storage`"))) {
do {
g_game.setAccountStorageValue(result->getNumber<uint32_t>("account_id"), result->getNumber<uint32_t>("key"), result->getNumber<int32_t>("value"));
} while (result->next());
}
}
bool Game::saveAccountStorageValues() const
{
DBTransaction transaction;
Database& db = Database::getInstance();
if (!transaction.begin()) {
return false;
}
if (!db.executeQuery("DELETE FROM `account_storage`")) {
return false;
}
for (const auto& accountIt : g_game.accountStorageMap) {
if (accountIt.second.empty()) {
continue;
}
DBInsert accountStorageQuery("INSERT INTO `account_storage` (`account_id`, `key`, `value`) VALUES");
for (const auto& storageIt : accountIt.second) {
if (!accountStorageQuery.addRow(fmt::format("{:d}, {:d}, {:d}", accountIt.first, storageIt.first, storageIt.second))) {
return false;
}
}
if (!accountStorageQuery.execute()) {
return false;
}
}
return transaction.commit();
}
void Game::startDecay(Item* item)
{
if (!item || !item->canDecay()) {
return;
}
ItemDecayState_t decayState = item->getDecaying();
if (decayState == DECAYING_TRUE) {
return;
}
if (item->getDuration() > 0) {
item->incrementReferenceCounter();
item->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(item);
} else {
internalDecayItem(item);
}
}
void Game::internalDecayItem(Item* item)
{
const ItemType& it = Item::items[item->getID()];
if (it.decayTo != 0) {
Item* newItem = transformItem(item, item->getDecayTo());
startDecay(newItem);
} else {
ReturnValue ret = internalRemoveItem(item);
if (ret != RETURNVALUE_NOERROR) {
std::cout << "[Debug - Game::internalDecayItem] internalDecayItem failed, error code: " << static_cast<uint32_t>(ret) << ", item id: " << item->getID() << std::endl;
}
}
}
void Game::checkDecay()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
size_t bucket = (lastBucket + 1) % EVENT_DECAY_BUCKETS;
auto it = decayItems[bucket].begin(), end = decayItems[bucket].end();
while (it != end) {
Item* item = *it;
if (!item->canDecay()) {
item->setDecaying(DECAYING_FALSE);
ReleaseItem(item);
it = decayItems[bucket].erase(it);
continue;
}
int32_t duration = item->getDuration();
int32_t decreaseTime = std::min<int32_t>(EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS, duration);
duration -= decreaseTime;
item->decreaseDuration(decreaseTime);
if (duration <= 0) {
it = decayItems[bucket].erase(it);
internalDecayItem(item);
ReleaseItem(item);
} else if (duration < EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
it = decayItems[bucket].erase(it);
size_t newBucket = (bucket + ((duration + EVENT_DECAYINTERVAL / 2) / 1000)) % EVENT_DECAY_BUCKETS;
if (newBucket == bucket) {
internalDecayItem(item);
ReleaseItem(item);
} else {
decayItems[newBucket].push_back(item);
}
} else {
++it;
}
}
lastBucket = bucket;
cleanup();
}
void Game::checkLight()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
updateWorldLightLevel();
LightInfo lightInfo = getWorldLightInfo();
for (const auto& it : players) {
it.second->sendWorldLight(lightInfo);
}
}
void Game::updateWorldLightLevel()
{
if (getWorldTime() >= GAME_SUNRISE && getWorldTime() <= GAME_DAYTIME) {
lightLevel = ((GAME_DAYTIME - GAME_SUNRISE) - (GAME_DAYTIME - getWorldTime())) * float(LIGHT_CHANGE_SUNRISE) + LIGHT_NIGHT;
} else if (getWorldTime() >= GAME_SUNSET && getWorldTime() <= GAME_NIGHTTIME) {
lightLevel = LIGHT_DAY - ((getWorldTime() - GAME_SUNSET) * float(LIGHT_CHANGE_SUNSET));
} else if (getWorldTime() >= GAME_NIGHTTIME || getWorldTime() < GAME_SUNRISE) {
lightLevel = LIGHT_NIGHT;
} else {
lightLevel = LIGHT_DAY;
}
}
void Game::updateWorldTime()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_WORLDTIMEINTERVAL, std::bind(&Game::updateWorldTime, this)));
time_t osTime = time(nullptr);
tm* timeInfo = localtime(&osTime);
worldTime = (timeInfo->tm_sec + (timeInfo->tm_min * 60)) / 2.5f;
}
void Game::shutdown()
{
std::cout << "Shutting down..." << std::flush;
g_scheduler.shutdown();
g_databaseTasks.shutdown();
g_dispatcher.shutdown();
map.spawns.clear();
raids.clear();
cleanup();
if (serviceManager) {
serviceManager->stop();
}
ConnectionManager::getInstance().closeAll();
std::cout << " done!" << std::endl;
}
void Game::cleanup()
{
//free memory
for (auto creature : ToReleaseCreatures) {
creature->decrementReferenceCounter();
}
ToReleaseCreatures.clear();
for (auto item : ToReleaseItems) {
item->decrementReferenceCounter();
}
ToReleaseItems.clear();
for (Item* item : toDecayItems) {
const uint32_t dur = item->getDuration();
if (dur >= EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
decayItems[lastBucket].push_back(item);
} else {
decayItems[(lastBucket + 1 + dur / 1000) % EVENT_DECAY_BUCKETS].push_back(item);
}
}
toDecayItems.clear();
}
void Game::ReleaseCreature(Creature* creature)
{
ToReleaseCreatures.push_back(creature);
}
void Game::ReleaseItem(Item* item)
{
ToReleaseItems.push_back(item);
}
void Game::broadcastMessage(const std::string& text, MessageClasses type) const
{
std::cout << "> Broadcasted message: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendTextMessage(type, text);
}
}
void Game::updateCreatureWalkthrough(const Creature* creature)
{
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
tmpPlayer->sendCreatureWalkthrough(creature, tmpPlayer->canWalkthroughEx(creature));
}
}
void Game::updateCreatureSkull(const Creature* creature)
{
if (getWorldType() != WORLD_TYPE_PVP) {
return;
}
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureSkull(creature);
}
}
void Game::updatePlayerShield(Player* player)
{
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureShield(player);
}
}
void Game::loadMotdNum()
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_num'");
if (result) {
motdNum = result->getNumber<uint32_t>("value");
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_num', '0')");
}
result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_hash'");
if (result) {
motdHash = result->getString("value");
if (motdHash != transformToSHA1(g_config.getString(ConfigManager::MOTD))) {
++motdNum;
}
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_hash', '')");
}
}
void Game::saveMotdNum() const
{
Database& db = Database::getInstance();
db.executeQuery(fmt::format("UPDATE `server_config` SET `value` = '{:d}' WHERE `config` = 'motd_num'", motdNum));
db.executeQuery(fmt::format("UPDATE `server_config` SET `value` = '{:s}' WHERE `config` = 'motd_hash'", transformToSHA1(g_config.getString(ConfigManager::MOTD))));
}
void Game::checkPlayersRecord()
{
const size_t playersOnline = getPlayersOnline();
if (playersOnline > playersRecord) {
uint32_t previousRecord = playersRecord;
playersRecord = playersOnline;
for (auto& it : g_globalEvents->getEventMap(GLOBALEVENT_RECORD)) {
it.second.executeRecord(playersRecord, previousRecord);
}
updatePlayersRecord();
}
}
void Game::updatePlayersRecord() const
{
Database& db = Database::getInstance();
db.executeQuery(fmt::format("UPDATE `server_config` SET `value` = '{:d}' WHERE `config` = 'players_record'", playersRecord));
}
void Game::loadPlayersRecord()
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'players_record'");
if (result) {
playersRecord = result->getNumber<uint32_t>("value");
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('players_record', '0')");
}
}
void Game::playerInviteToParty(uint32_t playerId, uint32_t invitedId)
{
if (playerId == invitedId) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || invitedPlayer->isInviting(player)) {
return;
}
if (invitedPlayer->getParty()) {
player->sendTextMessage(MESSAGE_INFO_DESCR, fmt::format("{:s} is already in a party.", invitedPlayer->getName()));
return;
}
Party* party = player->getParty();
if (!party) {
party = new Party(player);
} else if (party->getLeader() != player) {
return;
}
party->invitePlayer(*invitedPlayer);
}
void Game::playerJoinParty(uint32_t playerId, uint32_t leaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* leader = getPlayerByID(leaderId);
if (!leader || !leader->isInviting(player)) {
return;
}
Party* party = leader->getParty();
if (!party || party->getLeader() != leader) {
return;
}
if (player->getParty()) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "You are already in a party.");
return;
}
party->joinParty(*player);
}
void Game::playerRevokePartyInvitation(uint32_t playerId, uint32_t invitedId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || !player->isInviting(invitedPlayer)) {
return;
}
party->revokeInvitation(*invitedPlayer);
}
void Game::playerPassPartyLeadership(uint32_t playerId, uint32_t newLeaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* newLeader = getPlayerByID(newLeaderId);
if (!newLeader || !player->isPartner(newLeader)) {
return;
}
party->passPartyLeadership(newLeader);
}
void Game::playerLeaveParty(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || player->hasCondition(CONDITION_INFIGHT)) {
return;
}
party->leaveParty(player);
}
void Game::playerEnableSharedPartyExperience(uint32_t playerId, bool sharedExpActive)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || (player->hasCondition(CONDITION_INFIGHT) && player->getZone() != ZONE_PROTECTION)) {
return;
}
party->setSharedExperience(player, sharedExpActive);
}
void Game::sendGuildMotd(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Guild* guild = player->getGuild();
if (guild) {
player->sendChannelMessage("Message of the Day", guild->getMotd(), TALKTYPE_CHANNEL_R1, CHANNEL_GUILD);
}
}
void Game::kickPlayer(uint32_t playerId, bool displayEffect)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->kickPlayer(displayEffect);
}
void Game::playerReportRuleViolation(uint32_t playerId, const std::string& targetName, uint8_t reportType, uint8_t reportReason, const std::string& comment, const std::string& translation)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_events->eventPlayerOnReportRuleViolation(player, targetName, reportType, reportReason, comment, translation);
}
void Game::playerReportBug(uint32_t playerId, const std::string& message, const Position& position, uint8_t category)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_events->eventPlayerOnReportBug(player, message, position, category);
}
void Game::playerDebugAssert(uint32_t playerId, const std::string& assertLine, const std::string& date, const std::string& description, const std::string& comment)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
// TODO: move debug assertions to database
FILE* file = fopen("client_assertions.txt", "a");
if (file) {
fprintf(file, "----- %s - %s (%s) -----\n", formatDate(time(nullptr)).c_str(), player->getName().c_str(), convertIPToString(player->getIP()).c_str());
fprintf(file, "%s\n%s\n%s\n%s\n", assertLine.c_str(), date.c_str(), description.c_str(), comment.c_str());
fclose(file);
}
}
void Game::playerLeaveMarket(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setInMarket(false);
}
void Game::playerBrowseMarket(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
if (it.wareId == 0) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
player->sendMarketDetail(it.id);
}
void Game::playerBrowseMarketOwnOffers(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getOwnOffers(MARKETACTION_BUY, player->getGUID());
const MarketOfferList& sellOffers = IOMarket::getOwnOffers(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnOffers(buyOffers, sellOffers);
}
void Game::playerBrowseMarketOwnHistory(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const HistoryMarketOfferList& buyOffers = IOMarket::getOwnHistory(MARKETACTION_BUY, player->getGUID());
const HistoryMarketOfferList& sellOffers = IOMarket::getOwnHistory(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnHistory(buyOffers, sellOffers);
}
void Game::playerCreateMarketOffer(uint32_t playerId, uint8_t type, uint16_t spriteId, uint16_t amount, uint32_t price, bool anonymous)
{
if (amount == 0 || amount > 64000) {
return;
}
if (price == 0 || price > 999999999) {
return;
}
if (type != MARKETACTION_BUY && type != MARKETACTION_SELL) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
if (g_config.getBoolean(ConfigManager::MARKET_PREMIUM) && !player->isPremium()) {
player->sendMarketLeave();
return;
}
const ItemType& itt = Item::items.getItemIdByClientId(spriteId);
if (itt.id == 0 || itt.wareId == 0) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(itt.wareId);
if (it.id == 0 || it.wareId == 0) {
return;
}
if (!it.stackable && amount > 2000) {
return;
}
const uint32_t maxOfferCount = g_config.getNumber(ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER);
if (maxOfferCount != 0 && IOMarket::getPlayerOfferCount(player->getGUID()) >= maxOfferCount) {
return;
}
uint64_t fee = (price / 100.) * amount;
if (fee < MIN_MARKET_FEE) {
fee = MIN_MARKET_FEE;
} else if (fee > MAX_MARKET_FEE) {
fee = MAX_MARKET_FEE;
}
if (type == MARKETACTION_SELL) {
if (fee > (player->getMoney() + player->bankBalance)) {
return;
}
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
const auto debitCash = std::min(player->getMoney(), fee);
const auto debitBank = fee - debitCash;
removeMoney(player, debitCash);
player->bankBalance -= debitBank;
} else {
uint64_t totalPrice = static_cast<uint64_t>(price) * amount;
totalPrice += fee;
if (totalPrice > (player->getMoney() + player->bankBalance)) {
return;
}
const auto debitCash = std::min(player->getMoney(), totalPrice);
const auto debitBank = totalPrice - debitCash;
removeMoney(player, debitCash);
player->bankBalance -= debitBank;
}
IOMarket::createOffer(player->getGUID(), static_cast<MarketAction_t>(type), it.id, amount, price, anonymous);
player->sendMarketEnter(player->getLastDepotId());
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
}
void Game::playerCancelMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0 || offer.playerId != player->getGUID()) {
return;
}
if (offer.type == MARKETACTION_BUY) {
player->bankBalance += static_cast<uint64_t>(offer.price) * offer.amount;
player->sendMarketEnter(player->getLastDepotId());
} else {
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = offer.amount;
while (tmpAmount > 0) {
int32_t stackCount = std::min<int32_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < offer.amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
}
IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_CANCELLED);
offer.amount = 0;
offer.timestamp += g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
player->sendMarketCancelOffer(offer);
player->sendMarketEnter(player->getLastDepotId());
}
void Game::playerAcceptMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter, uint16_t amount)
{
if (amount == 0 || amount > 64000) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0) {
return;
}
uint32_t offerAccountId = IOLoginData::getAccountIdByPlayerId(offer.playerId);
if (offerAccountId == player->getAccount()) {
return;
}
if (amount > offer.amount) {
return;
}
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
uint64_t totalPrice = static_cast<uint64_t>(offer.price) * amount;
if (offer.type == MARKETACTION_BUY) {
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
Player* buyerPlayer = getPlayerByGUID(offer.playerId);
if (!buyerPlayer) {
buyerPlayer = new Player(nullptr);
if (!IOLoginData::loadPlayerById(buyerPlayer, offer.playerId)) {
delete buyerPlayer;
return;
}
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance += totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
if (buyerPlayer->isOffline()) {
IOLoginData::savePlayer(buyerPlayer);
delete buyerPlayer;
} else {
buyerPlayer->onReceiveMail();
}
} else {
if (totalPrice > (player->getMoney() + player->bankBalance)) {
return;
}
const auto debitCash = std::min(player->getMoney(), totalPrice);
const auto debitBank = totalPrice - debitCash;
removeMoney(player, debitCash);
player->bankBalance -= debitBank;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
Player* sellerPlayer = getPlayerByGUID(offer.playerId);
if (sellerPlayer) {
sellerPlayer->bankBalance += totalPrice;
} else {
IOLoginData::increaseBankBalance(offer.playerId, totalPrice);
}
player->onReceiveMail();
}
const int32_t marketOfferDuration = g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
IOMarket::appendHistory(player->getGUID(), (offer.type == MARKETACTION_BUY ? MARKETACTION_SELL : MARKETACTION_BUY), offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTEDEX);
IOMarket::appendHistory(offer.playerId, offer.type, offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTED);
offer.amount -= amount;
if (offer.amount == 0) {
IOMarket::deleteOffer(offer.id);
} else {
IOMarket::acceptOffer(offer.id, amount);
}
player->sendMarketEnter(player->getLastDepotId());
offer.timestamp += marketOfferDuration;
player->sendMarketAcceptOffer(offer);
}
void Game::parsePlayerExtendedOpcode(uint32_t playerId, uint8_t opcode, const std::string& buffer)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
for (CreatureEvent* creatureEvent : player->getCreatureEvents(CREATURE_EVENT_EXTENDED_OPCODE)) {
creatureEvent->executeExtendedOpcode(player, opcode, buffer);
}
}
std::forward_list<Item*> Game::getMarketItemList(uint16_t wareId, uint16_t sufficientCount, DepotChest* depotChest, Inbox* inbox)
{
std::forward_list<Item*> itemList;
uint16_t count = 0;
std::list<Container*> containers { depotChest, inbox };
do {
Container* container = containers.front();
containers.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containers.push_back(c);
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (itemType.wareId != wareId) {
continue;
}
if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) {
continue;
}
if (!item->hasMarketAttributes()) {
continue;
}
itemList.push_front(item);
count += Item::countByType(item, -1);
if (count >= sufficientCount) {
return itemList;
}
}
} while (!containers.empty());
return std::forward_list<Item*>();
}
void Game::forceAddCondition(uint32_t creatureId, Condition* condition)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
delete condition;
return;
}
creature->addCondition(condition, true);
}
void Game::forceRemoveCondition(uint32_t creatureId, ConditionType_t type)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
creature->removeCondition(type, true);
}
void Game::sendOfflineTrainingDialog(Player* player)
{
if (!player) {
return;
}
if (!player->hasModalWindowOpen(offlineTrainingWindow.id)) {
player->sendModalWindow(offlineTrainingWindow);
}
}
void Game::playerAnswerModalWindow(uint32_t playerId, uint32_t modalWindowId, uint8_t button, uint8_t choice)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->hasModalWindowOpen(modalWindowId)) {
return;
}
player->onModalWindowHandled(modalWindowId);
// offline training, hard-coded
if (modalWindowId == std::numeric_limits<uint32_t>::max()) {
if (button == offlineTrainingWindow.defaultEnterButton) {
if (choice == SKILL_SWORD || choice == SKILL_AXE || choice == SKILL_CLUB || choice == SKILL_DISTANCE || choice == SKILL_MAGLEVEL) {
BedItem* bedItem = player->getBedItem();
if (bedItem && bedItem->sleep(player)) {
player->setOfflineTrainingSkill(choice);
return;
}
}
} else {
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted.");
}
player->setBedItem(nullptr);
} else {
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_MODALWINDOW)) {
creatureEvent->executeModalWindow(player, modalWindowId, button, choice);
}
}
}
void Game::addPlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames[lowercase_name] = player;
mappedPlayerGuids[player->getGUID()] = player;
wildcardTree.insert(lowercase_name);
players[player->getID()] = player;
}
void Game::removePlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames.erase(lowercase_name);
mappedPlayerGuids.erase(player->getGUID());
wildcardTree.remove(lowercase_name);
players.erase(player->getID());
}
void Game::addNpc(Npc* npc)
{
npcs[npc->getID()] = npc;
}
void Game::removeNpc(Npc* npc)
{
npcs.erase(npc->getID());
}
void Game::addMonster(Monster* monster)
{
monsters[monster->getID()] = monster;
}
void Game::removeMonster(Monster* monster)
{
monsters.erase(monster->getID());
}
Guild* Game::getGuild(uint32_t id) const
{
auto it = guilds.find(id);
if (it == guilds.end()) {
return nullptr;
}
return it->second;
}
void Game::addGuild(Guild* guild)
{
guilds[guild->getId()] = guild;
}
void Game::removeGuild(uint32_t guildId)
{
guilds.erase(guildId);
}
void Game::decreaseBrowseFieldRef(const Position& pos)
{
Tile* tile = map.getTile(pos.x, pos.y, pos.z);
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it != browseFields.end()) {
it->second->decrementReferenceCounter();
}
}
void Game::internalRemoveItems(std::vector<Item*> itemList, uint32_t amount, bool stackable)
{
if (stackable) {
for (Item* item : itemList) {
if (item->getItemCount() > amount) {
internalRemoveItem(item, amount);
break;
} else {
amount -= item->getItemCount();
internalRemoveItem(item);
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
}
BedItem* Game::getBedBySleeper(uint32_t guid) const
{
auto it = bedSleepersMap.find(guid);
if (it == bedSleepersMap.end()) {
return nullptr;
}
return it->second;
}
void Game::setBedSleeper(BedItem* bed, uint32_t guid)
{
bedSleepersMap[guid] = bed;
}
void Game::removeBedSleeper(uint32_t guid)
{
auto it = bedSleepersMap.find(guid);
if (it != bedSleepersMap.end()) {
bedSleepersMap.erase(it);
}
}
Item* Game::getUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it == uniqueItems.end()) {
return nullptr;
}
return it->second;
}
bool Game::addUniqueItem(uint16_t uniqueId, Item* item)
{
auto result = uniqueItems.emplace(uniqueId, item);
if (!result.second) {
std::cout << "Duplicate unique id: " << uniqueId << std::endl;
}
return result.second;
}
void Game::removeUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it != uniqueItems.end()) {
uniqueItems.erase(it);
}
}
bool Game::reload(ReloadTypes_t reloadType)
{
switch (reloadType) {
case RELOAD_TYPE_ACTIONS: return g_actions->reload();
case RELOAD_TYPE_CHAT: return g_chat->load();
case RELOAD_TYPE_CONFIG: return g_config.reload();
case RELOAD_TYPE_CREATURESCRIPTS: {
g_creatureEvents->reload();
g_creatureEvents->removeInvalidEvents();
return true;
}
case RELOAD_TYPE_EVENTS: return g_events->load();
case RELOAD_TYPE_GLOBALEVENTS: return g_globalEvents->reload();
case RELOAD_TYPE_ITEMS: return Item::items.reload();
case RELOAD_TYPE_MONSTERS: return g_monsters.reload();
case RELOAD_TYPE_MOUNTS: return mounts.reload();
case RELOAD_TYPE_MOVEMENTS: return g_moveEvents->reload();
case RELOAD_TYPE_NPCS: {
Npcs::reload();
return true;
}
case RELOAD_TYPE_QUESTS: return quests.reload();
case RELOAD_TYPE_RAIDS: return raids.reload() && raids.startup();
case RELOAD_TYPE_SPELLS: {
if (!g_spells->reload()) {
std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl;
std::terminate();
} else if (!g_monsters.reload()) {
std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl;
std::terminate();
}
return true;
}
case RELOAD_TYPE_TALKACTIONS: return g_talkActions->reload();
case RELOAD_TYPE_WEAPONS: {
bool results = g_weapons->reload();
g_weapons->loadDefaults();
return results;
}
case RELOAD_TYPE_SCRIPTS: {
// commented out stuff is TODO, once we approach further in revscriptsys
g_actions->clear(true);
g_creatureEvents->clear(true);
g_moveEvents->clear(true);
g_talkActions->clear(true);
g_globalEvents->clear(true);
g_weapons->clear(true);
g_weapons->loadDefaults();
g_spells->clear(true);
g_scripts->loadScripts("scripts", false, true);
g_creatureEvents->removeInvalidEvents();
/*
Npcs::reload();
raids.reload() && raids.startup();
Item::items.reload();
quests.reload();
mounts.reload();
g_config.reload();
g_events->load();
g_chat->load();
*/
return true;
}
default: {
if (!g_spells->reload()) {
std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl;
std::terminate();
} else if (!g_monsters.reload()) {
std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl;
std::terminate();
}
g_actions->reload();
g_config.reload();
g_creatureEvents->reload();
g_monsters.reload();
g_moveEvents->reload();
Npcs::reload();
raids.reload() && raids.startup();
g_talkActions->reload();
Item::items.reload();
g_weapons->reload();
g_weapons->clear(true);
g_weapons->loadDefaults();
quests.reload();
mounts.reload();
g_globalEvents->reload();
g_events->load();
g_chat->load();
g_actions->clear(true);
g_creatureEvents->clear(true);
g_moveEvents->clear(true);
g_talkActions->clear(true);
g_globalEvents->clear(true);
g_spells->clear(true);
g_scripts->loadScripts("scripts", false, true);
g_creatureEvents->removeInvalidEvents();
return true;
}
}
return true;
}
| 1 | 19,767 | I believe this is incorrect, if I'm not wrong, MESSAGE_TRADE should be used when buying/selling items from NPC's | otland-forgottenserver | cpp |
@@ -728,6 +728,8 @@ namespace pwiz.Skyline
return _listGraphPeakArea.FirstOrDefault(g => g.Type == type) ?? CreateGraphPeakArea(type);
else if (split[1] == typeof(MassErrorGraphController).Name)
return _listGraphMassError.FirstOrDefault(g => g.Type == type) ?? CreateGraphMassError(type);
+ else if (split[1] == typeof(DetectionsGraphController).Name)
+ return _listGraphDetections.FirstOrDefault(g => g.Type == type) ?? CreateGraphDetections(type);
else
return null;
} | 1 | /*
* Original author: Brendan MacLean <brendanx .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2009 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Windows.Forms;
using DigitalRune.Windows.Docking;
using pwiz.Common.Collections;
using pwiz.Common.DataBinding;
using pwiz.Common.SystemUtil;
using pwiz.Skyline.Alerts;
using pwiz.Skyline.Controls.Databinding;
using pwiz.Skyline.Controls.Graphs;
using pwiz.Skyline.Controls.SeqNode;
using pwiz.Skyline.EditUI;
using pwiz.Skyline.Model;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.DocSettings.Extensions;
using pwiz.Skyline.Model.Results;
using pwiz.Skyline.Model.Results.Scoring;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Controls;
using pwiz.Skyline.Controls.AuditLog;
using pwiz.Skyline.Controls.Graphs.Calibration;
using pwiz.Skyline.Controls.GroupComparison;
using pwiz.Skyline.Model.AuditLog;
using pwiz.Skyline.Model.ElementLocators.ExportAnnotations;
using pwiz.Skyline.Model.Prosit.Models;
using pwiz.Skyline.Model.RetentionTimes;
using pwiz.Skyline.SettingsUI;
using pwiz.Skyline.Util;
using ZedGraph;
using pwiz.Skyline.Util.Extensions;
using PeptideDocNode = pwiz.Skyline.Model.PeptideDocNode;
using Transition = pwiz.Skyline.Model.Transition;
namespace pwiz.Skyline
{
public partial class SkylineWindow :
GraphSpectrum.IStateProvider,
GraphChromatogram.IStateProvider,
GraphSummary.IStateProvider
{
private GraphSpectrum _graphSpectrum;
private GraphFullScan _graphFullScan;
private readonly GraphSpectrumSettings _graphSpectrumSettings;
private readonly List<GraphSummary> _listGraphRetentionTime = new List<GraphSummary>();
private readonly List<GraphSummary> _listGraphPeakArea = new List<GraphSummary>();
private readonly List<GraphSummary> _listGraphMassError = new List<GraphSummary>();
private readonly List<GraphSummary> _listGraphDetections = new List<GraphSummary>();
private DockableForm _resultsGridForm;
private DocumentGridForm _documentGridForm;
private CalibrationForm _calibrationForm;
private AuditLogForm _auditLogForm;
public static int MAX_GRAPH_CHROM = 100; // Never show more than this many chromatograms, lest we hit the Windows handle limit
private readonly List<GraphChromatogram> _listGraphChrom = new List<GraphChromatogram>(); // List order is MRU, with oldest in position 0
private bool _inGraphUpdate;
private ChromFileInfoId _alignToFile;
private bool _alignToPrediction;
public RTGraphController RTGraphController
{
get
{
var active = _listGraphRetentionTime.FirstOrDefault();
if (active == null)
return null;
return active.Controller as RTGraphController;
}
}
private GraphSummary ContextMenuGraphSummary { get; set; }
private void dockPanel_ActiveDocumentChanged(object sender, EventArgs e)
{
try
{
ActiveDocumentChanged();
}
catch (Exception x)
{
Program.ReportException(x);
}
}
private void ActiveDocumentChanged()
{
if (DocumentUI == null)
return;
var settings = DocumentUI.Settings;
if (_closing || ComboResults == null || ComboResults.IsDisposed || _inGraphUpdate || !settings.HasResults ||
settings.MeasuredResults.Chromatograms.Count < 2)
return;
var activeForm = dockPanel.ActiveDocument;
bool activeLibrary = ReferenceEquals(_graphSpectrum, activeForm);
_listGraphPeakArea.ForEach(g => g.ActiveLibrary = activeLibrary);
_listGraphRetentionTime.ForEach(g => g.ActiveLibrary = activeLibrary);
foreach (var graphChrom in _listGraphChrom.ToArray()) // List may be updating concurrent with this access, so convert to array first
{
if (ReferenceEquals(graphChrom, activeForm))
ComboResults.SelectedItem = graphChrom.TabText;
}
}
private void graphsToolStripMenuItem_Click(object sender, EventArgs e)
{
ShowGraphSpectrum(Settings.Default.ShowSpectra = true);
}
private class DockPanelLayoutLock : IDisposable
{
private DockPanel _dockPanel;
private Control _coverControl;
private Cursor _cursorBegin;
private bool _locked;
private HashSet<Control> _suspendedControls;
public DockPanelLayoutLock(DockPanel dockPanel, bool startLocked = false)
{
_dockPanel = dockPanel;
if (startLocked)
EnsureLocked();
}
/// <summary>
/// Called to lock layout of the <see cref="DockPanel"/>. Locking
/// is defered until it is determined to be necessary to avoid the
/// relayout calculation when locking is unnecessary.
/// </summary>
public void EnsureLocked()
{
if (!_locked)
{
_locked = true;
_dockPanel.SuspendLayout(true);
_coverControl = new CoverControl(_dockPanel);
var cursorControl = _dockPanel.TopLevelControl ?? _dockPanel;
_cursorBegin = cursorControl.Cursor;
cursorControl.Cursor = Cursors.WaitCursor;
Assume.IsNull(_suspendedControls);
_suspendedControls = new HashSet<Control>();
foreach (var pane in _dockPanel.Panes)
{
EnsurePaneLocked(pane);
}
}
}
public void Dispose()
{
if (_locked && _dockPanel != null)
{
_dockPanel.ResumeLayout(true, true);
_coverControl.Dispose();
var cursorControl = _dockPanel.TopLevelControl ?? _dockPanel;
cursorControl.Cursor = _cursorBegin;
}
if (_suspendedControls != null)
{
foreach (var control in _suspendedControls)
{
control.ResumeLayout();
}
_suspendedControls = null;
}
_dockPanel = null; // Only once
}
/// <summary>
/// Ensures that "SuspendControl" has been called on the DockPane, as well
/// as its controls (specifically its DockPaneStrip which spends a long time
/// redrawing as each child is added).
/// </summary>
/// <param name="dockPane"></param>
public void EnsurePaneLocked(DockPane dockPane)
{
if (SuspendControl(dockPane))
{
foreach (var control in dockPane.Controls.OfType<Control>())
{
SuspendControl(control);
}
}
}
/// <summary>
/// Ensures SuspendLayout has called on the control
/// </summary>
/// <returns>false if the control has already been suspended</returns>
private bool SuspendControl(Control control)
{
if (control == null || _suspendedControls == null)
{
return false;
}
if (!_suspendedControls.Add(control))
{
return false;
}
control.SuspendLayout();
return true;
}
}
private void UpdateGraphUI(SrmSettings settingsOld, bool docIdChanged)
{
SrmSettings settingsNew = DocumentUI.Settings;
if (ReferenceEquals(settingsNew, settingsOld))
{
// Just about any change could potentially change the list
// or retention times or peak areas.
if (settingsNew.HasResults)
{
// UpdateGraphPanes can handle null values in the list, but
// only call it when at least one of the graphs is present.
if (_listGraphMassError.Any() || _listGraphPeakArea.Any() || _listGraphRetentionTime.Any())
UpdateGraphPanes(new List<IUpdatable>(_listGraphMassError.Concat(_listGraphPeakArea.Concat(_listGraphRetentionTime))));
}
return;
}
var listUpdateGraphs = new List<IUpdatable>();
var filterNew = settingsNew.TransitionSettings.Filter;
var filterOld = settingsOld.TransitionSettings.Filter;
if (!ReferenceEquals(filterNew, filterOld))
{
// If ion types or charges changed, make sure the new
// ones are on and the old ones are off by default.
bool refresh = false;
if (!ArrayUtil.EqualsDeep(filterNew.PeptideIonTypes, filterOld.PeptideIonTypes) ||
!ArrayUtil.EqualsDeep(filterNew.SmallMoleculeIonTypes, filterOld.SmallMoleculeIonTypes))
{
// Only turn off old ion types, if new settings are not MS1-only full-scan
var fullScan = settingsNew.TransitionSettings.FullScan;
var enablePeptides = DocumentUI.DocumentType != SrmDocument.DOCUMENT_TYPE.small_molecules;
var enableSmallMolecules = DocumentUI.HasSmallMolecules;
if (!fullScan.IsEnabled || fullScan.IsEnabledMsMs)
{
CheckIonTypes(filterOld.PeptideIonTypes, false, enablePeptides);
CheckIonTypes(filterOld.SmallMoleculeIonTypes, false, enableSmallMolecules);
}
CheckIonTypes(filterNew.PeptideIonTypes, true, enablePeptides);
CheckIonTypes(filterNew.SmallMoleculeIonTypes, true, enableSmallMolecules);
refresh = true;
}
// Charge selection
if (!ArrayUtil.EqualsDeep(filterNew.PeptideProductCharges, filterOld.PeptideProductCharges) ||
!ArrayUtil.EqualsDeep(filterNew.SmallMoleculeFragmentAdducts, filterOld.SmallMoleculeFragmentAdducts))
{
// First clear any old charge enabling
CheckIonCharges(filterOld.PeptideProductCharges, false);
CheckIonCharges(filterOld.SmallMoleculeFragmentAdducts, false);
// Then enable based on settings and document contents
switch (DocumentUI.DocumentType)
{
case SrmDocument.DOCUMENT_TYPE.none:
case SrmDocument.DOCUMENT_TYPE.proteomic:
CheckIonCharges(filterNew.PeptideProductCharges, true);
break;
case SrmDocument.DOCUMENT_TYPE.small_molecules:
CheckIonCharges(filterNew.SmallMoleculeFragmentAdducts, true);
break;
case SrmDocument.DOCUMENT_TYPE.mixed:
CheckIonCharges(filterNew.PeptideProductCharges, true);
CheckIonCharges(filterNew.PeptideProductCharges, true);
break;
}
refresh = true;
}
if (refresh && _graphSpectrum != null)
listUpdateGraphs.Add(_graphSpectrum);
}
using (var layoutLock = new DockPanelLayoutLock(dockPanel))
{
bool deserialized = false;
string layoutFile = GetViewFile(DocumentFilePath);
if (docIdChanged && File.Exists(layoutFile))
{
layoutLock.EnsureLocked();
try
{
using (var layoutReader = new StreamReader(layoutFile))
{
LoadLayout(layoutReader.BaseStream);
}
deserialized = true;
}
catch (Exception x)
{
var message = TextUtil.LineSeparate(string.Format(Resources.SkylineWindow_UpdateGraphUI_Failure_attempting_to_load_the_window_layout_file__0__, layoutFile),
Resources.SkylineWindow_UpdateGraphUI_Rename_or_delete_this_file_to_restore_the_default_layout,
Resources.SkylineWindow_UpdateGraphUI_Skyline_may_also_need_to_be_restarted);
throw new IOException(message, x);
}
}
EnableGraphSpectrum(layoutLock, settingsNew, deserialized);
var enable = settingsNew.HasResults;
bool enableSchedule = IsRetentionTimeGraphTypeEnabled(GraphTypeSummary.schedule);
bool enableRunToRun = IsRetentionTimeGraphTypeEnabled(GraphTypeSummary.run_to_run_regression);
if (replicateComparisonMenuItem.Enabled != enable ||
retentionTimesMenuItem.Enabled != enableSchedule ||
runToRunMenuItem.Enabled != enableRunToRun)
{
retentionTimesMenuItem.Enabled = enableSchedule;
replicateComparisonMenuItem.Enabled = enable;
timePeptideComparisonMenuItem.Enabled = enable;
regressionMenuItem.Enabled = enable;
scoreToRunMenuItem.Enabled = enable;
runToRunMenuItem.Enabled = runToRunToolStripMenuItem.Enabled = enableRunToRun;
schedulingMenuItem.Enabled = enableSchedule;
if (!deserialized)
{
layoutLock.EnsureLocked();
UpdateUIGraphRetentionTime(IsRetentionTimeGraphTypeEnabled);
}
}
if (resultsGridMenuItem.Enabled != enable)
{
resultsGridMenuItem.Enabled = enable;
if (!deserialized)
{
layoutLock.EnsureLocked();
ShowResultsGrid(enable && Settings.Default.ShowResultsGrid);
}
}
if (peakAreasMenuItem.Enabled != enable)
{
peakAreasMenuItem.Enabled = enable;
areaReplicateComparisonMenuItem.Enabled = enable;
areaPeptideComparisonMenuItem.Enabled = enable;
areaCVHistogramMenuItem.Enabled = enable;
areaCVHistogram2DMenuItem.Enabled = enable;
if (!deserialized)
{
layoutLock.EnsureLocked();
UpdateUIGraphPeakArea(enable);
}
}
if (massErrorsMenuItem.Enabled != enable)
{
massErrorsMenuItem.Enabled = enable;
massErrorReplicateComparisonMenuItem.Enabled = enable;
massErrorPeptideComparisonMenuItem.Enabled = enable;
if (!deserialized)
{
layoutLock.EnsureLocked();
UpdateUIGraphMassError(enable);
}
}
if (detectionsPlotsMenuItem.Enabled != enable)
{
detectionsPlotsMenuItem.Enabled = enable;
detectionsHistogramMenuItem.Enabled = enable;
detectionsReplicateComparisonMenuItem.Enabled = Enabled;
if (!deserialized)
{
layoutLock.EnsureLocked();
UpdateUIGraphDetection(enable);
}
}
if (_graphFullScan != null && _graphFullScan.Visible && !enable)
{
layoutLock.EnsureLocked();
DestroyGraphFullScan();
}
if (!ReferenceEquals(settingsNew.MeasuredResults, settingsOld.MeasuredResults))
{
// First hide all graph windows for results that no longer exist in the document
foreach (var graphChromatogram in _listGraphChrom.ToArray())
{
string name = graphChromatogram.NameSet;
// Look for matching chromatogram sets across the documents
ChromatogramSet chromSetOld;
ChromatogramSet chromSetNew;
int index;
if (settingsOld.HasResults &&
settingsOld.MeasuredResults.TryGetChromatogramSet(name, out chromSetOld, out index) &&
settingsNew.HasResults &&
settingsNew.MeasuredResults.TryGetChromatogramSet(chromSetOld.Id.GlobalIndex, out chromSetNew, out index))
{
// If matching chromatogram found, but name has changed, then
// update the graph pane
if (!Equals(chromSetNew.Name, chromSetOld.Name))
name = graphChromatogram.NameSet = chromSetNew.Name;
}
var results = settingsNew.MeasuredResults;
if (results == null || !results.Chromatograms.Contains(chrom => Equals(chrom.Name, name)))
{
layoutLock.EnsureLocked();
ShowGraphChrom(graphChromatogram.NameSet, false);
// If changed to a new document, destroy unused panes
if (docIdChanged)
{
var graphChrom = GetGraphChrom(name);
if (graphChrom != null)
{
RemoveGraphChromFromList(graphChrom);
}
}
}
}
// Next show any graph windows for results that were not previously part of
// the document.
if (settingsNew.MeasuredResults != null && !deserialized)
{
// Keep changes in graph panes from stealing the focus
var focusStart = User32.GetFocusedControl();
_inGraphUpdate = true;
try
{
string nameFirst = null;
string nameLast = SelectedGraphChromName;
foreach (var chromatogram in settingsNew.MeasuredResults.Chromatograms)
{
string name = chromatogram.Name;
var graphChrom = GetGraphChrom(name);
if (graphChrom == null)
{
if (_listGraphChrom.Count < MAX_GRAPH_CHROM) // Limit window count to conserve win32 handles
{
layoutLock.EnsureLocked();
graphChrom = CreateGraphChrom(name, nameLast, false);
layoutLock.EnsurePaneLocked(graphChrom.Pane);
nameFirst = nameFirst ?? name;
nameLast = name;
}
}
// If the pane is not showing a tab for this graph, than add one.
else if (graphChrom.Pane == null ||
!graphChrom.Pane.DisplayingContents.Contains(graphChrom))
{
layoutLock.EnsureLocked();
ShowGraphChrom(name, true);
nameFirst = nameFirst ?? name;
nameLast = name;
}
}
// Put the first set on top, since it will get populated with data first
if (nameFirst != null)
{
layoutLock.EnsureLocked();
ShowGraphChrom(nameFirst, true);
}
}
finally
{
_inGraphUpdate = false;
}
if (focusStart != null)
focusStart.Focus();
}
// Update displayed graphs, which are no longer valid
var listGraphUpdate = from graph in _listGraphChrom
where graph.Visible
where !graph.IsCurrent(settingsOld, settingsNew)
select graph;
listUpdateGraphs.AddRange(listGraphUpdate.ToArray());
// Make sure view menu is correctly enabled
bool enabled = settingsNew.HasResults;
chromatogramsMenuItem.Enabled = enabled;
transitionsMenuItem.Enabled = enabled;
transformChromMenuItem.Enabled = enabled;
autoZoomMenuItem.Enabled = enabled;
arrangeGraphsToolStripMenuItem.Enabled = enabled;
// UpdateReplicateMenuItems(enabled);
// CONSIDER: Enable/disable submenus too?
}
else if (!ReferenceEquals(settingsNew.PeptideSettings.Prediction.RetentionTime,
settingsOld.PeptideSettings.Prediction.RetentionTime))
{
// If retention time regression changed, and retention prediction is showing
// update all displayed graphs
if (Settings.Default.ShowRetentionTimePred)
listUpdateGraphs.AddRange(_listGraphChrom.ToArray());
}
} // layoutLock.Dispose()
// Do this after layout is unlocked, because it messes up the selected graph otherwise
if (_sequenceTreeForm == null)
{
ShowSequenceTreeForm(true);
}
// Just about any change could potentially change these panes.
if (settingsNew.HasResults)
{
listUpdateGraphs.AddRange(_listGraphRetentionTime);
listUpdateGraphs.AddRange(_listGraphPeakArea);
listUpdateGraphs.AddRange(_listGraphMassError);
}
UpdateGraphPanes(listUpdateGraphs);
FoldChangeForm.CloseInapplicableForms(this);
}
public void UpdateGraphSpectrumEnabled()
{
using (var layoutLock = new DockPanelLayoutLock(dockPanel))
{
EnableGraphSpectrum(layoutLock, DocumentUI.Settings, false);
}
}
private void EnableGraphSpectrum(DockPanelLayoutLock layoutLock, SrmSettings settings, bool deserialized)
{
bool hasLibraries = settings.PeptideSettings.Libraries.HasLibraries;
bool enable = hasLibraries || PrositHelpers.PrositSettingsValid;
if (enable)
{
UpdateIonTypesMenuItemsVisibility();
}
bool enableChanging = graphsToolStripMenuItem.Enabled != enable;
if (enableChanging)
{
graphsToolStripMenuItem.Enabled = enable;
ionTypesMenuItem.Enabled = enable;
chargesMenuItem.Enabled = enable;
ranksMenuItem.Enabled = enable;
}
// Make sure we don't keep a spectrum graph around because it was
// persisted when Prosit settings were on and they no longer are
if ((enableChanging && !deserialized) || (deserialized && !hasLibraries && !enable))
{
layoutLock.EnsureLocked();
ShowGraphSpectrum(enable && Settings.Default.ShowSpectra);
}
}
private void RemoveGraphChromFromList(GraphChromatogram graphChrom)
{
_listGraphChrom.Remove(graphChrom);
DestroyGraphChrom(graphChrom);
}
// Load view layout from the given stream.
public void LoadLayout(Stream layoutStream)
{
using (new DockPanelLayoutLock(dockPanel, true))
{
LoadLayoutLocked(layoutStream);
}
}
// Load view layout from the given stream.
private void LoadLayoutLocked(Stream layoutStream)
{
// Get rid of any existing graph windows, since the layout
// deserialization has problems using existing windows.
DestroySequenceTreeForm();
DestroyGraphSpectrum();
var type = RTGraphController.GraphType;
_listGraphRetentionTime.ToList().ForEach(DestroyGraphRetentionTime);
RTGraphController.GraphType = type;
type = AreaGraphController.GraphType;
_listGraphPeakArea.ToList().ForEach(DestroyGraphPeakArea);
AreaGraphController.GraphType = type;
type = MassErrorGraphController.GraphType;
_listGraphMassError.ToList().ForEach(DestroyGraphMassError);
MassErrorGraphController.GraphType = type;
type = DetectionsGraphController.GraphType;
_listGraphDetections.ToList().ForEach(DestroyGraphDetections);
FormUtil.OpenForms.OfType<FoldChangeForm>().ForEach(f => f.Close());
DestroyResultsGrid();
DestroyDocumentGrid();
DestroyAuditLogForm();
DestroyCalibrationForm();
DestroyImmediateWindow();
HideFindResults(true);
foreach (GraphChromatogram graphChrom in _listGraphChrom)
DestroyGraphChrom(graphChrom);
_listGraphChrom.Clear();
DestroyGraphFullScan();
dockPanel.LoadFromXml(layoutStream, DeserializeForm);
// SequenceTree resizes often prior to display, so we must restore its scrolling after
// all resizing has occured
if (SequenceTree != null)
SequenceTree.UpdateTopNode();
EnsureFloatingWindowsVisible();
}
public void DestroyAllChromatogramsGraph()
{
// Remove any multi-progress left in the list
lock (_listProgress)
{
int multiIndex = _listProgress.IndexOf(s => s is MultiProgressStatus);
if (multiIndex != -1)
_listProgress.RemoveAt(multiIndex);
}
if (ImportingResultsWindow != null)
{
ImportingResultsWindow.Close();
ImportingResultsWindow = null;
// Reset progress for the current document
_chromatogramManager.ResetProgress(Document);
}
}
public void InvalidateChromatogramGraphs()
{
_listGraphChrom.ForEach(graph => graph.IsCacheInvalidated = true);
}
private void EnsureFloatingWindowsVisible()
{
if (Program.SkylineOffscreen)
return;
foreach (var floatingWindow in dockPanel.FloatingWindows)
{
var screen = Screen.FromControl(floatingWindow);
var rectScreen = screen.WorkingArea;
var rectWindow = floatingWindow.Bounds;
rectWindow.Width = Math.Min(rectWindow.Width, rectScreen.Width);
rectWindow.Height = Math.Min(rectWindow.Height, rectScreen.Height);
rectWindow.X = Math.Max(rectScreen.X,
Math.Min(rectWindow.X, rectScreen.X + rectScreen.Width - rectWindow.Width));
rectWindow.Y = Math.Max(rectScreen.Y,
Math.Min(rectWindow.Y, rectScreen.Y + rectScreen.Height - rectWindow.Height));
if (!Equals(rectWindow, floatingWindow.Bounds))
floatingWindow.Bounds = rectWindow;
}
}
private IDockableForm DeserializeForm(string persistentString)
{
if (persistentString.StartsWith(typeof(SequenceTreeForm).ToString()))
{
return _sequenceTreeForm ?? CreateSequenceTreeForm(persistentString);
}
else if (Equals(persistentString, typeof(GraphSpectrum).ToString()))
{
return _graphSpectrum ?? CreateGraphSpectrum();
}
var split = persistentString.Split('|');
var splitLength = split.Length;
// Backward compatibility
if (persistentString.EndsWith(@"Skyline.Controls.GraphRetentionTime") ||
splitLength == 2 && split[0] == typeof(GraphSummary).ToString() &&
split[1] == typeof(RTGraphController).Name)
{
var type = RTGraphController.GraphType;
return _listGraphRetentionTime.FirstOrDefault(g => g.Type == type) ?? CreateGraphRetentionTime(type);
}
// Backward compatibility
if (persistentString.EndsWith(@"Skyline.Controls.GraphPeakArea") ||
splitLength == 2 && split[0] == typeof(GraphSummary).ToString() &&
split[1] == typeof(AreaGraphController).Name)
{
var type = AreaGraphController.GraphType;
return _listGraphPeakArea.FirstOrDefault(g => g.Type == type) ?? CreateGraphPeakArea(type);
}
// Backward compatibility
if (splitLength == 2 && split[0] == typeof(GraphSummary).ToString() &&
split[1] == typeof(MassErrorGraphController).Name)
{
var type = MassErrorGraphController.GraphType;
return _listGraphMassError.FirstOrDefault(g => g.Type == type) ?? CreateGraphMassError(type);
}
if (splitLength == 3 && split[0] == typeof(GraphSummary).ToString())
{
var type = Helpers.ParseEnum(split[2], GraphTypeSummary.invalid);
if (split[1] == typeof(RTGraphController).Name)
return _listGraphRetentionTime.FirstOrDefault(g => g.Type == type) ?? CreateGraphRetentionTime(type);
else if (split[1] == typeof(AreaGraphController).Name)
return _listGraphPeakArea.FirstOrDefault(g => g.Type == type) ?? CreateGraphPeakArea(type);
else if (split[1] == typeof(MassErrorGraphController).Name)
return _listGraphMassError.FirstOrDefault(g => g.Type == type) ?? CreateGraphMassError(type);
else
return null;
}
if (Equals(persistentString, typeof(ResultsGridForm).ToString()) || Equals(persistentString, typeof (LiveResultsGrid).ToString()))
{
return _resultsGridForm ?? CreateResultsGrid();
}
if (Equals(persistentString, typeof (DocumentGridForm).ToString()))
{
return _documentGridForm ?? CreateDocumentGrid();
}
if (Equals(persistentString, typeof (CalibrationForm).ToString()))
{
return _calibrationForm ?? CreateCalibrationForm();
}
if (Equals(persistentString, typeof(AuditLogForm).ToString()))
{
return _auditLogForm ?? CreateAuditLogForm();
}
if (Equals(persistentString, typeof(ImmediateWindow).ToString()))
{
return _immediateWindow ?? CreateImmediateWindow();
}
if (persistentString.StartsWith(typeof(GraphChromatogram).ToString()))
{
if (_listGraphChrom.Count >= MAX_GRAPH_CHROM)
{
return null;
}
string name = GraphChromatogram.GetTabText(persistentString);
var settings = DocumentUI.Settings;
if (settings.HasResults)
{
bool hasName = settings.MeasuredResults.ContainsChromatogram(name);
// For tests with persisted layouts containing the default chromatogram name
// check for the default name in the current language
if (!hasName && Equals(name, Resources.ResourceManager.GetString(
@"ImportResultsDlg_DefaultNewName_Default_Name", CultureInfo.InvariantCulture)))
{
name = Resources.ImportResultsDlg_DefaultNewName_Default_Name;
hasName = settings.MeasuredResults.ContainsChromatogram(name);
}
if (hasName)
return GetGraphChrom(name) ?? CreateGraphChrom(name);
}
}
var foldChangeForm = FoldChangeForm.RestoreFoldChangeForm(this, persistentString);
if (null != foldChangeForm)
{
return foldChangeForm;
}
if (Equals(persistentString, typeof(GraphFullScan).ToString()))
{
return _graphFullScan ?? CreateGraphFullScan();
}
return null;
}
// Disabling these menuitems allows the normal meaning of Ctrl-Up/Down
// to cause scrolling in the tree view.
// private void UpdateReplicateMenuItems(bool hasResults)
// {
// nextReplicateMenuItem.Enabled = hasResults && comboResults.SelectedIndex < comboResults.Items.Count - 1;
// previousReplicateMenuItem.Enabled = hasResults && comboResults.SelectedIndex > 0;
// }
public void UpdateGraphPanes()
{
// Add only visible graphs to the update list, since each update
// must pass through the Windows message queue on a WM_TIMER.
var listVisibleChrom = from graphChrom in _listGraphChrom
where graphChrom.Visible
select graphChrom;
var listUpdateGraphs = new List<IUpdatable>(listVisibleChrom.ToArray());
if (_graphSpectrum != null && _graphSpectrum.Visible)
listUpdateGraphs.Add(_graphSpectrum);
listUpdateGraphs.AddRange(_listGraphRetentionTime.Where(g => g.Visible));
listUpdateGraphs.AddRange(_listGraphPeakArea.Where(g => g.Visible));
listUpdateGraphs.AddRange(_listGraphMassError.Where(g => g.Visible));
if (_calibrationForm != null && _calibrationForm.Visible)
listUpdateGraphs.Add(_calibrationForm);
UpdateGraphPanes(listUpdateGraphs);
}
private void UpdateGraphPanes(ICollection<IUpdatable> graphPanes)
{
if (graphPanes.Count == 0)
return;
// Restart the timer at 100ms, giving the UI time to interrupt.
_timerGraphs.Stop();
_timerGraphs.Interval = 100;
var previousGraphPanes = _timerGraphs.Tag as ICollection<IUpdatable>;
if (previousGraphPanes != null && previousGraphPanes.Count > 0)
{
_timerGraphs.Tag = previousGraphPanes.Concat(graphPanes).Distinct().ToList();
}
else
{
_timerGraphs.Tag = graphPanes;
}
_timerGraphs.Start();
}
private void UpdateGraphPanes(object sender, EventArgs e)
{
// Stop the timer immediately, to keep from getting called again
// for the same triggering event.
_timerGraphs.Stop();
IList<IUpdatable> listGraphPanes = (IList<IUpdatable>)_timerGraphs.Tag;
int count = 0;
if (listGraphPanes != null && listGraphPanes.Count > 0)
{
// Allow nulls in the list
while (listGraphPanes.Count > 0 && listGraphPanes[0] == null)
listGraphPanes.RemoveAt(0);
if (listGraphPanes.Count > 0)
{
listGraphPanes[0].UpdateUI();
listGraphPanes.RemoveAt(0);
}
count = listGraphPanes.Count;
}
if (count != 0)
{
// If more graphs to update, update them as quickly as possible.
_timerGraphs.Interval = 1;
_timerGraphs.Start();
}
}
/// <summary>
/// Returns true if the graph panels still need to be updated to show the current selection.
/// Used for testing.
/// </summary>
public bool IsGraphUpdatePending
{
get
{
// The implementation of "UpdateGraphs" is such that this question should only be
// asked on the event thread
if (InvokeRequired)
{
throw new InvalidOperationException(
Resources.SkylineWindow_IsGraphUpdatePending_Must_be_called_from_event_thread);
}
if (_timerGraphs.Enabled)
{
return true;
}
return false;
}
}
public ChromFileInfoId AlignToFile
{
get { return _alignToFile; }
set
{
if (ReferenceEquals(value, AlignToFile))
{
return;
}
_alignToFile = value;
UpdateGraphPanes();
}
}
public bool AlignToRtPrediction
{
get { return null == AlignToFile && _alignToPrediction; }
set
{
if (value == AlignToRtPrediction)
{
return;
}
_alignToPrediction = value;
if (_alignToPrediction)
{
_alignToFile = null;
}
UpdateGraphPanes();
}
}
public GraphValues.IRetentionTimeTransformOp GetRetentionTimeTransformOperation()
{
if (null != AlignToFile)
{
return GraphValues.AlignToFileOp.GetAlignmentToFile(AlignToFile, Document.Settings);
}
if (AlignToRtPrediction)
{
// Only align to regressions that are auto-calculated. Otherwise,
// conversion will be the same for all replicates, making this just
// a linear unit conversion
var predictRT = DocumentUI.Settings.PeptideSettings.Prediction.RetentionTime;
if (predictRT != null && predictRT.IsAutoCalculated)
{
return new GraphValues.RegressionUnconversion(predictRT);
}
}
return null;
}
#region Spectrum graph
public GraphSpectrum GraphSpectrum { get { return _graphSpectrum; } }
public GraphSpectrumSettings GraphSpectrumSettings { get { return _graphSpectrumSettings; } }
private void aMenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowAIons = !_graphSpectrumSettings.ShowAIons;
}
private void bMenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowBIons = !_graphSpectrumSettings.ShowBIons;
}
private void cMenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowCIons = !_graphSpectrumSettings.ShowCIons;
}
private void xMenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowXIons = !_graphSpectrumSettings.ShowXIons;
}
private void yMenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowYIons = !_graphSpectrumSettings.ShowYIons;
}
private void zMenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowZIons = !_graphSpectrumSettings.ShowZIons;
}
private void fragmentsMenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowFragmentIons = !_graphSpectrumSettings.ShowFragmentIons;
}
private void precursorIonMenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowPrecursorIon = !_graphSpectrumSettings.ShowPrecursorIon;
}
private void ionTypesMenuItem_DropDownOpening(object sender, EventArgs e)
{
var set = Settings.Default;
aMenuItem.Checked = aionsContextMenuItem.Checked = set.ShowAIons;
bMenuItem.Checked = bionsContextMenuItem.Checked = set.ShowBIons;
cMenuItem.Checked = cionsContextMenuItem.Checked = set.ShowCIons;
xMenuItem.Checked = xionsContextMenuItem.Checked = set.ShowXIons;
yMenuItem.Checked = yionsContextMenuItem.Checked = set.ShowYIons;
zMenuItem.Checked = zionsContextMenuItem.Checked = set.ShowZIons;
fragmentsMenuItem.Checked = fragmentionsContextMenuItem.Checked = set.ShowFragmentIons;
precursorIonMenuItem.Checked = precursorIonContextMenuItem.Checked = set.ShowPrecursorIon;
UpdateIonTypesMenuItemsVisibility();
}
// Update the Ion Types menu for document contents
private void UpdateIonTypesMenuItemsVisibility()
{
aMenuItem.Visible = bMenuItem.Visible = cMenuItem.Visible =
xMenuItem.Visible = yMenuItem.Visible = zMenuItem.Visible =
DocumentUI.DocumentType != SrmDocument.DOCUMENT_TYPE.small_molecules;
fragmentsMenuItem.Visible = DocumentUI.HasSmallMolecules;
}
private void charge1MenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowCharge1 = !_graphSpectrumSettings.ShowCharge1;
}
private void charge2MenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowCharge2 = !_graphSpectrumSettings.ShowCharge2;
}
private void charge3MenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowCharge3 = !_graphSpectrumSettings.ShowCharge3;
}
private void charge4MenuItem_Click(object sender, EventArgs e)
{
_graphSpectrumSettings.ShowCharge4 = !_graphSpectrumSettings.ShowCharge4;
}
private void chargesMenuItem_DropDownOpening(object sender, EventArgs e)
{
var set = _graphSpectrumSettings;
charge1MenuItem.Checked = charge1ContextMenuItem.Checked = set.ShowCharge1;
charge2MenuItem.Checked = charge2ContextMenuItem.Checked = set.ShowCharge2;
charge3MenuItem.Checked = charge3ContextMenuItem.Checked = set.ShowCharge3;
charge4MenuItem.Checked = charge4ContextMenuItem.Checked = set.ShowCharge4;
}
private void editToolStripMenuItem_DropDownOpening(object sender, EventArgs e)
{
CanApplyOrRemovePeak(null, null, out var canApply, out var canRemove);
if (!canApply && !canRemove)
{
integrationToolStripMenuItem.Enabled = false;
}
else
{
applyPeakAllToolStripMenuItem.Enabled = applyPeakSubsequentToolStripMenuItem.Enabled = canApply;
applyPeakGroupToolStripMenuItem.Text = Resources.SkylineWindow_editToolStripMenuItem_DropDownOpening_Apply_Peak_to_Group;
groupApplyToByToolStripMenuItem.DropDownItems.Clear();
applyPeakGroupToolStripMenuItem.Enabled = groupApplyToByToolStripMenuItem.Enabled = false;
if (ReplicateValue.GetGroupableReplicateValues(DocumentUI).Any())
{
groupApplyToByToolStripMenuItem.Enabled = true;
var selectedAnnotation = GetGroupApplyToDescription();
if (selectedAnnotation != null)
{
applyPeakGroupToolStripMenuItem.Text = Resources.SkylineWindow_BuildChromatogramMenu_Apply_Peak_to_ + selectedAnnotation;
applyPeakGroupToolStripMenuItem.Enabled = true;
}
var i = 0;
AddGroupByMenuItems(null, groupApplyToByToolStripMenuItem, replicateValue=> Settings.Default.GroupApplyToBy = replicateValue?.ToPersistedString(), false, Settings.Default.GroupApplyToBy, ref i);
groupApplyToByGraphMenuItem.Visible = true;
}
else
{
groupApplyToByGraphMenuItem.Visible = false;
}
removePeakToolStripMenuItem.Enabled = canRemove;
integrationToolStripMenuItem.Enabled = true;
}
}
private void AddApplyRemovePeak(ToolStrip menuStrip, ToolStripItemCollection removePeakItems, IsotopeLabelType labelType, int separator, ref int iInsert)
{
var document = DocumentUI;
CanApplyOrRemovePeak(removePeakItems, labelType, out var canApply, out var canRemove);
if (canApply || canRemove)
{
if (separator < 0)
menuStrip.Items.Insert(iInsert++, toolStripSeparator33);
if (canApply)
{
menuStrip.Items.Insert(iInsert++, applyPeakAllGraphMenuItem);
menuStrip.Items.Insert(iInsert++, applyPeakSubsequentGraphMenuItem);
var groupable = ReplicateValue.GetGroupableReplicateValues(document).ToArray();
if (groupable.Any())
{
var groupBy = GetGroupApplyToDescription();
if (groupBy != null)
{
applyPeakGroupGraphMenuItem.Text =
Resources.SkylineWindow_BuildChromatogramMenu_Apply_Peak_to_ + groupBy;
menuStrip.Items.Insert(iInsert++, applyPeakGroupGraphMenuItem);
}
AddGroupByMenuItems(menuStrip, groupApplyToByGraphMenuItem, SetGroupApplyToBy, false, Settings.Default.GroupApplyToBy, ref iInsert);
groupApplyToByGraphMenuItem.Visible = true;
}
else
{
groupApplyToByGraphMenuItem.Visible = false;
}
}
if (canRemove)
menuStrip.Items.Insert(iInsert++, removePeakGraphMenuItem);
if (separator > 0)
menuStrip.Items.Insert(iInsert++, toolStripSeparator33);
}
}
private void CanApplyOrRemovePeak(ToolStripItemCollection removePeakItems, IsotopeLabelType labelType, out bool canApply, out bool canRemove)
{
canApply = canRemove = false;
if (!DocumentUI.Settings.HasResults)
return;
var selectedTreeNode = SelectedNode as SrmTreeNode;
var displayType = GraphChromatogram.GetDisplayType(DocumentUI, selectedTreeNode);
if (displayType == DisplayTypeChrom.base_peak || displayType == DisplayTypeChrom.tic || displayType == DisplayTypeChrom.qc)
return;
var chromFileInfoId = GetSelectedChromFileId();
var node = selectedTreeNode as TransitionTreeNode;
if (node != null && GraphChromatogram.IsSingleTransitionDisplay)
{
if (HasPeak(SelectedResultsIndex, chromFileInfoId, node.DocNode))
{
if (removePeakItems != null)
removePeakItems.Add(new ToolStripMenuItem());
canApply = canRemove = true;
}
}
else if (selectedTreeNode is TransitionTreeNode && displayType == DisplayTypeChrom.all ||
selectedTreeNode is TransitionGroupTreeNode ||
selectedTreeNode is PeptideTreeNode treeNode && treeNode.DocNode.Children.Any())
{
canApply = true;
var nodeGroupTree = SequenceTree.GetNodeOfType<TransitionGroupTreeNode>();
var hasPeak = nodeGroupTree != null
? HasPeak(SelectedResultsIndex, chromFileInfoId, nodeGroupTree.DocNode)
: SequenceTree.GetNodeOfType<PeptideTreeNode>().DocNode.TransitionGroups.Any(tranGroup => HasPeak(SelectedResultsIndex, chromFileInfoId, tranGroup));
if (hasPeak)
{
removePeakItems?.Clear();
canRemove = true;
// Remove [IsotopeLabelType]
if (removePeakItems != null && labelType != null)
{
// only if multiple isotope label types
if (selectedTreeNode is PeptideTreeNode peptideTreeNode &&
peptideTreeNode.DocNode.TransitionGroups.Select(nodeTranGroup => nodeTranGroup.TransitionGroup.LabelType).Distinct().Count() > 1)
{
removePeakItems.Add(new ToolStripMenuItem { Tag = labelType });
}
}
}
}
}
public ChromFileInfoId GetSelectedChromFileId()
{
var document = Document;
if (!document.Settings.HasResults || SelectedResultsIndex < 0 || SelectedResultsIndex >= document.Settings.MeasuredResults.Chromatograms.Count)
{
return null;
}
var graphChrom = GetGraphChrom(Document.MeasuredResults.Chromatograms[SelectedResultsIndex].Name);
if (graphChrom == null)
{
return null;
}
return graphChrom.GetChromFileInfoId();
}
public string GetGroupApplyToDescription()
{
var document = Document;
var groupBy = ReplicateValue.FromPersistedString(document.Settings, Settings.Default.GroupApplyToBy);
if (groupBy == null)
{
return null;
}
object value = null;
if (document.Settings.HasResults)
{
int replicateIndex = SelectedResultsIndex;
if (replicateIndex >= 0 && replicateIndex < document.Settings.MeasuredResults.Chromatograms.Count)
{
var chromatogramSet = document.Settings.MeasuredResults.Chromatograms[replicateIndex];
value = groupBy.GetValue(new AnnotationCalculator(document), chromatogramSet);
}
}
return groupBy.Title + ':' + value;
}
private void ranksMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowRanks = !Settings.Default.ShowRanks;
UpdateSpectrumGraph(false);
}
private void scoresContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowLibraryScores = !Settings.Default.ShowLibraryScores;
UpdateSpectrumGraph(false);
}
private void ionMzValuesContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowIonMz = !Settings.Default.ShowIonMz;
UpdateSpectrumGraph(false);
}
private void observedMzValuesContextMenuItem_Click(object sender, EventArgs e)
{
ToggleObservedMzValues();
}
public void ToggleObservedMzValues()
{
Settings.Default.ShowObservedMz = !Settings.Default.ShowObservedMz;
UpdateSpectrumGraph(false);
}
void GraphSpectrum.IStateProvider.BuildSpectrumMenu(bool isProteomic, ZedGraphControl zedGraphControl, ContextMenuStrip menuStrip)
{
// Store original menuitems in an array, and insert a separator
ToolStripItem[] items = new ToolStripItem[menuStrip.Items.Count];
int iUnzoom = -1;
for (int i = 0; i < items.Length; i++)
{
items[i] = menuStrip.Items[i];
string tag = (string)items[i].Tag;
if (tag == @"unzoom")
iUnzoom = i;
}
if (iUnzoom != -1)
menuStrip.Items.Insert(iUnzoom, toolStripSeparator27);
// Insert skyline specific menus
var set = Settings.Default;
int iInsert = 0;
if (isProteomic)
{
aionsContextMenuItem.Checked = set.ShowAIons;
menuStrip.Items.Insert(iInsert++, aionsContextMenuItem);
bionsContextMenuItem.Checked = set.ShowBIons;
menuStrip.Items.Insert(iInsert++, bionsContextMenuItem);
cionsContextMenuItem.Checked = set.ShowCIons;
menuStrip.Items.Insert(iInsert++, cionsContextMenuItem);
xionsContextMenuItem.Checked = set.ShowXIons;
menuStrip.Items.Insert(iInsert++, xionsContextMenuItem);
yionsContextMenuItem.Checked = set.ShowYIons;
menuStrip.Items.Insert(iInsert++, yionsContextMenuItem);
zionsContextMenuItem.Checked = set.ShowZIons;
menuStrip.Items.Insert(iInsert++, zionsContextMenuItem);
}
else
{
fragmentionsContextMenuItem.Checked = set.ShowFragmentIons;
menuStrip.Items.Insert(iInsert++, fragmentionsContextMenuItem);
}
precursorIonContextMenuItem.Checked = set.ShowPrecursorIon;
menuStrip.Items.Insert(iInsert++, precursorIonContextMenuItem);
menuStrip.Items.Insert(iInsert++, toolStripSeparator11);
menuStrip.Items.Insert(iInsert++, chargesContextMenuItem);
if (chargesContextMenuItem.DropDownItems.Count == 0)
{
chargesContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
charge1ContextMenuItem,
charge2ContextMenuItem,
charge3ContextMenuItem,
charge4ContextMenuItem,
});
}
menuStrip.Items.Insert(iInsert++, toolStripSeparator12);
ranksContextMenuItem.Checked = set.ShowRanks;
menuStrip.Items.Insert(iInsert++, ranksContextMenuItem);
scoreContextMenuItem.Checked = set.ShowLibraryScores;
menuStrip.Items.Insert(iInsert++, scoreContextMenuItem);
ionMzValuesContextMenuItem.Checked = set.ShowIonMz;
menuStrip.Items.Insert(iInsert++, ionMzValuesContextMenuItem);
observedMzValuesContextMenuItem.Checked = set.ShowObservedMz;
menuStrip.Items.Insert(iInsert++, observedMzValuesContextMenuItem);
duplicatesContextMenuItem.Checked = set.ShowDuplicateIons;
menuStrip.Items.Insert(iInsert++, duplicatesContextMenuItem);
menuStrip.Items.Insert(iInsert++, toolStripSeparator13);
lockYaxisContextMenuItem.Checked = set.LockYAxis;
menuStrip.Items.Insert(iInsert++, lockYaxisContextMenuItem);
menuStrip.Items.Insert(iInsert++, toolStripSeparator14);
// Need to test small mol
if (isProteomic)
{
prositLibMatchItem.Checked = Settings.Default.Prosit;
menuStrip.Items.Insert(iInsert++, prositLibMatchItem);
mirrorMenuItem.Checked = Settings.Default.LibMatchMirror;
menuStrip.Items.Insert(iInsert++, mirrorMenuItem);
menuStrip.Items.Insert(iInsert++, toolStripSeparator61);
}
menuStrip.Items.Insert(iInsert++, spectrumPropsContextMenuItem);
showLibraryChromatogramsSpectrumContextMenuItem.Checked = set.ShowLibraryChromatograms;
menuStrip.Items.Insert(iInsert++, showLibraryChromatogramsSpectrumContextMenuItem);
menuStrip.Items.Insert(iInsert, toolStripSeparator15);
// Remove some ZedGraph menu items not of interest
foreach (var item in items)
{
string tag = (string)item.Tag;
if (tag == @"set_default" || tag == @"show_val")
menuStrip.Items.Remove(item);
}
CopyEmfToolStripMenuItem.AddToContextMenu(zedGraphControl, menuStrip);
}
private void duplicatesContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowDuplicateIons = duplicatesContextMenuItem.Checked;
UpdateSpectrumGraph(false);
}
private void lockYaxisContextMenuItem_Click(object sender, EventArgs e)
{
// Avoid updating the rest of the graph just to change the y-axis lock state
_graphSpectrum.LockYAxis(Settings.Default.LockYAxis = lockYaxisContextMenuItem.Checked);
}
private void showChromatogramsSpectrumContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowLibraryChromatograms = !Settings.Default.ShowLibraryChromatograms;
UpdateGraphPanes();
}
private void spectrumPropsContextMenuItem_Click(object sender, EventArgs e)
{
ShowSpectrumProperties();
}
public void ShowSpectrumProperties()
{
using (var dlg = new SpectrumChartPropertyDlg())
{
if (dlg.ShowDialog(this) == DialogResult.OK)
UpdateSpectrumGraph(false);
}
}
private void zoomSpectrumContextMenuItem_Click(object sender, EventArgs e)
{
if (_graphSpectrum != null)
_graphSpectrum.ZoomSpectrumToSettings();
}
public void ShowGraphSpectrum(bool show)
{
if (show)
{
if (_graphSpectrum != null)
{
_graphSpectrum.Show(dockPanel, DockState.DockRight);
_graphSpectrum.Focus();
}
else
{
_graphSpectrum = CreateGraphSpectrum();
_graphSpectrum.Show(dockPanel, DockState.DockRight);
}
}
else if (_graphSpectrum != null)
{
// Save current setting for showing spectra
show = Settings.Default.ShowSpectra;
// Close the spectrum graph window
_graphSpectrum.Hide();
// Restore setting and menuitem from saved value
Settings.Default.ShowSpectra = show;
}
}
// Testing
public bool IsGraphSpectrumVisible
{
get { return _graphSpectrum != null && _graphSpectrum.Visible; }
}
private GraphSpectrum CreateGraphSpectrum()
{
// Create a new spectrum graph
_graphSpectrum = new GraphSpectrum(this);
_graphSpectrum.UpdateUI();
_graphSpectrum.FormClosed += graphSpectrum_FormClosed;
_graphSpectrum.VisibleChanged += graphSpectrum_VisibleChanged;
_graphSpectrum.SelectedSpectrumChanged += graphSpectrum_SelectedSpectrumChanged;
return _graphSpectrum;
}
private void DestroyGraphSpectrum()
{
if (_graphSpectrum != null)
{
_graphSpectrum.FormClosed -= graphSpectrum_FormClosed;
_graphSpectrum.VisibleChanged -= graphSpectrum_VisibleChanged;
_graphSpectrum.SelectedSpectrumChanged -= graphSpectrum_SelectedSpectrumChanged;
_graphSpectrum.HideOnClose = false;
_graphSpectrum.Close();
_graphSpectrum = null;
}
}
private void graphSpectrum_VisibleChanged(object sender, EventArgs e)
{
if (_graphSpectrum != null)
Settings.Default.ShowSpectra = _graphSpectrum.Visible;
}
private void graphSpectrum_FormClosed(object sender, FormClosedEventArgs e)
{
// Update settings and menu check
Settings.Default.ShowSpectra = false;
_graphSpectrum = null;
}
private void graphSpectrum_SelectedSpectrumChanged(object sender, SelectedSpectrumEventArgs e)
{
// Might need to update the selected MS/MS spectrum, if full-scan
// filtering was used.
if (DocumentUI.Settings.HasResults &&
(DocumentUI.Settings.TransitionSettings.FullScan.IsEnabled || DocumentUI.Settings.PeptideSettings.Libraries.HasMidasLibrary))
{
if (e.Spectrum != null && e.IsUserAction)
{
// Activate the selected replicate, if there is one associated with
// the selected spectrum.
string replicateName = e.Spectrum.ReplicateName;
if (!string.IsNullOrEmpty(replicateName))
{
int resultsIndex = DocumentUI.Settings.MeasuredResults.Chromatograms.IndexOf(chrom =>
Equals(replicateName, chrom.Name));
if (resultsIndex != -1)
SelectedResultsIndex = resultsIndex;
}
}
UpdateChromGraphs();
}
}
private void UpdateSpectrumGraph(bool selectionChanged)
{
if (_graphSpectrum != null)
_graphSpectrum.UpdateUI(selectionChanged);
}
// private static bool SameChargeGroups(PeptideTreeNode nodeTree)
// {
// // Check to see if all transition groups under a peptide tree node
// // have the same precursor charge.
// int charge = 0;
// foreach (TransitionGroupDocNode nodeGroup in nodeTree.DocNode.Children)
// {
// if (charge == 0)
// charge = nodeGroup.TransitionGroup.PrecursorCharge;
// else if (charge != nodeGroup.TransitionGroup.PrecursorCharge)
// return false;
// }
// // True only if there was at least one group
// return (charge != 0);
// }
IList<IonType> GraphSpectrum.IStateProvider.ShowIonTypes(bool isProteomic)
{
return _graphSpectrumSettings.ShowIonTypes(isProteomic);
}
private void CheckIonTypes(IEnumerable<IonType> types, bool check, bool visible)
{
foreach (var type in types)
CheckIonType(type, check, visible);
}
private void CheckIonType(IonType type, bool check, bool visible)
{
var set = Settings.Default;
switch (type)
{
case IonType.a: set.ShowAIons = aMenuItem.Checked = check; aMenuItem.Visible = visible; break;
case IonType.b: set.ShowBIons = bMenuItem.Checked = check; bMenuItem.Visible = visible; break;
case IonType.c: set.ShowCIons = cMenuItem.Checked = check; cMenuItem.Visible = visible; break;
case IonType.x: set.ShowXIons = xMenuItem.Checked = check; xMenuItem.Visible = visible; break;
case IonType.y: set.ShowYIons = yMenuItem.Checked = check; yMenuItem.Visible = visible; break;
case IonType.z: set.ShowZIons = zMenuItem.Checked = check; zMenuItem.Visible = visible; break;
case IonType.custom: set.ShowFragmentIons = fragmentsMenuItem.Checked = check; fragmentsMenuItem.Visible = visible; break;
}
}
// N.B. we're interested in the absolute value of charge here, so output list may be shorter than input list
// CONSIDER(bspratt): we may want finer adduct-level control for small molecule use
IList<int> GraphSpectrum.IStateProvider.ShowIonCharges(IEnumerable<Adduct> adductPriority)
{
return _graphSpectrumSettings.ShowIonCharges(adductPriority);
}
private void CheckIonCharges(IEnumerable<Adduct> charges, bool check)
{
foreach (var charge in charges)
CheckIonCharge(charge, check);
}
private void CheckIonCharge(Adduct adduct, bool check)
{
// Set charge settings without causing UI to update
var set = Settings.Default;
switch (Math.Abs(adduct.AdductCharge)) // TODO(bspratt) - need a lot more flexibility here, neg charges, M+Na etc
{
case 1: set.ShowCharge1 = charge1MenuItem.Checked = check; break;
case 2: set.ShowCharge2 = charge2MenuItem.Checked = check; break;
case 3: set.ShowCharge3 = charge3MenuItem.Checked = check; break;
case 4: set.ShowCharge4 = charge4MenuItem.Checked = check; break;
}
}
public void HideFullScanGraph()
{
if (_graphFullScan != null)
_graphFullScan.Hide();
}
private void ShowGraphFullScan(IScanProvider scanProvider, int transitionIndex, int scanIndex)
{
if (_graphFullScan != null)
{
_graphFullScan.Activate();
_graphFullScan.Focus();
}
else
{
_graphFullScan = CreateGraphFullScan();
// Choose a position to float the window
var rectFloat = GetFloatingRectangleForNewWindow();
_graphFullScan.Show(dockPanel, rectFloat);
}
_graphFullScan.ShowSpectrum(scanProvider, transitionIndex, scanIndex);
}
// Testing
public GraphFullScan GraphFullScan
{
get { return _graphFullScan; }
}
private GraphFullScan CreateGraphFullScan()
{
// Create a new spectrum graph
_graphFullScan = new GraphFullScan(this);
_graphFullScan.UpdateUI();
_graphFullScan.FormClosed += graphFullScan_FormClosed;
_graphFullScan.VisibleChanged += graphFullScan_VisibleChanged;
_graphFullScan.SelectedScanChanged += graphFullScan_SelectedScanChanged;
return _graphFullScan;
}
private void DestroyGraphFullScan()
{
if (_graphFullScan != null)
{
_graphFullScan.FormClosed -= graphFullScan_FormClosed;
_graphFullScan.VisibleChanged -= graphFullScan_VisibleChanged;
_graphFullScan.SelectedScanChanged -= graphFullScan_SelectedScanChanged;
_graphFullScan.HideOnClose = false;
_graphFullScan.Close();
_graphFullScan = null;
}
}
private void graphFullScan_VisibleChanged(object sender, EventArgs e)
{
if (_graphFullScan != null)
Settings.Default.ShowFullScan = _graphFullScan.Visible;
}
private void graphFullScan_FormClosed(object sender, FormClosedEventArgs e)
{
// Update settings and menu check
Settings.Default.ShowFullScan = false;
_graphFullScan = null;
}
private void graphFullScan_SelectedScanChanged(object sender, SelectedScanEventArgs e)
{
SelectedScanFile = e.DataFile;
SelectedScanRetentionTime = e.RetentionTime;
SelectedScanTransition = e.TransitionId;
UpdateChromGraphs();
}
#endregion
#region Chromatogram graphs
private void chromatogramsMenuItem_DropDownOpening(object sender, EventArgs e)
{
ToolStripMenuItem menu = chromatogramsMenuItem;
if (!DocumentUI.Settings.HasResults)
{
// Strange problem in .NET where a dropdown will show when
// its menuitem is disabled.
chromatogramsMenuItem.HideDropDown();
return;
}
// If MeasuredResults is null, then this menuitem is incorrectly enabled
var chromatograms = DocumentUI.Settings.MeasuredResults.Chromatograms;
int i = 0;
menu.DropDown.SuspendLayout();
try
{
foreach (var chrom in chromatograms)
{
string name = chrom.Name;
ToolStripMenuItem item = null;
if (i < menu.DropDownItems.Count)
item = menu.DropDownItems[i] as ToolStripMenuItem;
if (item == null || name != item.Name)
{
// Remove the rest of the existing items
while (!ReferenceEquals(menu.DropDownItems[i], toolStripSeparatorReplicates))
menu.DropDownItems.RemoveAt(i);
ShowChromHandler handler = new ShowChromHandler(this, chrom.Name);
item = new ToolStripMenuItem(chrom.Name, null,
handler.menuItem_Click);
menu.DropDownItems.Insert(i, item);
}
i++;
}
// Remove the rest of the existing items
while (!ReferenceEquals(menu.DropDownItems[i], toolStripSeparatorReplicates))
menu.DropDownItems.RemoveAt(i);
}
finally
{
menu.DropDown.ResumeLayout();
}
closeChromatogramMenuItem.Enabled = !string.IsNullOrEmpty(SelectedGraphChromName);
}
private class ShowChromHandler
{
private readonly SkylineWindow _skyline;
private readonly string _nameChromatogram;
public ShowChromHandler(SkylineWindow skyline, string nameChromatogram)
{
_skyline = skyline;
_nameChromatogram = nameChromatogram;
}
public void menuItem_Click(object sender, EventArgs e)
{
_skyline.ShowGraphChrom(_nameChromatogram, true);
}
}
public PeptideGraphInfo GetPeptideGraphInfo(DocNode docNode)
{
return SequenceTree.GetPeptideGraphInfo(docNode);
}
void GraphChromatogram.IStateProvider.BuildChromatogramMenu(ZedGraphControl zedGraphControl, PaneKey paneKey, ContextMenuStrip menuStrip, ChromFileInfoId chromFileInfoId)
{
// Store original menuitems in an array, and insert a separator
ToolStripItem[] items = new ToolStripItem[menuStrip.Items.Count];
int iUnzoom = -1;
for (int i = 0; i < items.Length; i++)
{
items[i] = menuStrip.Items[i];
string tag = (string)items[i].Tag;
if (tag == @"unzoom")
iUnzoom = i;
}
if (iUnzoom != -1)
menuStrip.Items.Insert(iUnzoom, toolStripSeparator26);
// Insert skyline specific menus
var set = Settings.Default;
int iInsert = 0;
var settings = DocumentUI.Settings;
bool retentionPredict = (settings.PeptideSettings.Prediction.RetentionTime != null);
bool peptideIdTimes = (settings.PeptideSettings.Libraries.HasLibraries &&
(settings.TransitionSettings.FullScan.IsEnabled || settings.PeptideSettings.Libraries.HasMidasLibrary));
AddApplyRemovePeak(menuStrip, removePeakGraphMenuItem.DropDownItems, paneKey.IsotopeLabelType, 1, ref iInsert);
legendChromContextMenuItem.Checked = set.ShowChromatogramLegend;
menuStrip.Items.Insert(iInsert++, legendChromContextMenuItem);
var fullScan = Document.Settings.TransitionSettings.FullScan;
if (ChromatogramCache.FORMAT_VERSION_CACHE > ChromatogramCache.FORMAT_VERSION_CACHE_4
&& fullScan.IsEnabled
&& (fullScan.IsHighResPrecursor || fullScan.IsHighResProduct))
{
massErrorContextMenuItem.Checked = set.ShowMassError;
menuStrip.Items.Insert(iInsert++, massErrorContextMenuItem);
}
peakBoundariesContextMenuItem.Checked = set.ShowPeakBoundaries;
menuStrip.Items.Insert(iInsert++, peakBoundariesContextMenuItem);
originalPeakMenuItem.Checked = set.ShowOriginalPeak;
menuStrip.Items.Insert(iInsert++, originalPeakMenuItem);
menuStrip.Items.Insert(iInsert++, retentionTimesContextMenuItem);
if (retentionTimesContextMenuItem.DropDownItems.Count == 0)
{
retentionTimesContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
allRTContextMenuItem,
bestRTContextMenuItem,
thresholdRTContextMenuItem,
noneRTContextMenuItem,
rawTimesMenuItemSplitter,
rawTimesContextMenuItem
});
}
if (retentionPredict)
{
retentionTimePredContextMenuItem.Checked = set.ShowRetentionTimePred;
menuStrip.Items.Insert(iInsert++, retentionTimePredContextMenuItem);
}
rawTimesContextMenuItem.Checked = set.ChromShowRawTimes;
bool alignedTimes = settings.HasAlignedTimes();
bool unalignedTimes = settings.HasUnalignedTimes();
if (peptideIdTimes || alignedTimes || unalignedTimes)
{
menuStrip.Items.Insert(iInsert++, peptideIDTimesContextMenuItem);
peptideIDTimesContextMenuItem.DropDownItems.Clear();
idTimesNoneContextMenuItem.Checked = false;
peptideIDTimesContextMenuItem.DropDownItems.Add(idTimesNoneContextMenuItem);
if (peptideIdTimes)
{
idTimesMatchingContextMenuItem.Checked = set.ShowPeptideIdTimes;
peptideIDTimesContextMenuItem.DropDownItems.Add(idTimesMatchingContextMenuItem);
}
if (settings.HasAlignedTimes())
{
idTimesAlignedContextMenuItem.Checked = set.ShowAlignedPeptideIdTimes;
peptideIDTimesContextMenuItem.DropDownItems.Add(idTimesAlignedContextMenuItem);
}
if (settings.HasUnalignedTimes())
{
idTimesOtherContextMenuItem.Checked = set.ShowUnalignedPeptideIdTimes;
peptideIDTimesContextMenuItem.DropDownItems.Add(idTimesOtherContextMenuItem);
}
idTimesNoneContextMenuItem.Checked = !peptideIDTimesContextMenuItem.DropDownItems
.Cast<ToolStripMenuItem>()
.Any(idItem => idItem.Checked);
}
menuStrip.Items.Insert(iInsert++, toolStripSeparator16);
AddTransitionContextMenu(menuStrip, iInsert++);
menuStrip.Items.Insert(iInsert++, transformChromContextMenuItem);
// Sometimes child menuitems are stripped from the parent
if (transformChromContextMenuItem.DropDownItems.Count == 0)
{
transformChromContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
transformChromNoneContextMenuItem,
transformChromInterpolatedContextMenuItem,
secondDerivativeContextMenuItem,
firstDerivativeContextMenuItem,
smoothSGChromContextMenuItem
});
}
menuStrip.Items.Insert(iInsert++, toolStripSeparator17);
menuStrip.Items.Insert(iInsert++, autoZoomContextMenuItem);
// Sometimes child menuitems are stripped from the parent
if (autoZoomContextMenuItem.DropDownItems.Count == 0)
{
autoZoomContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
autoZoomNoneContextMenuItem,
autoZoomBestPeakContextMenuItem,
autoZoomRTWindowContextMenuItem,
autoZoomBothContextMenuItem
});
}
lockYChromContextMenuItem.Checked = set.LockYChrom;
menuStrip.Items.Insert(iInsert++, lockYChromContextMenuItem);
synchronizeZoomingContextMenuItem.Checked = set.AutoZoomAllChromatograms;
menuStrip.Items.Insert(iInsert++, synchronizeZoomingContextMenuItem);
iInsert = InsertAlignmentMenuItems(menuStrip.Items, chromFileInfoId, iInsert);
menuStrip.Items.Insert(iInsert++, toolStripSeparator18);
menuStrip.Items.Insert(iInsert++, chromPropsContextMenuItem);
menuStrip.Items.Insert(iInsert, toolStripSeparator19);
// Remove some ZedGraph menu items not of interest
foreach (var item in items)
{
string tag = (string)item.Tag;
if (tag == @"set_default" || tag == @"show_val")
menuStrip.Items.Remove(item);
}
CopyEmfToolStripMenuItem.AddToContextMenu(zedGraphControl, menuStrip);
}
private void AddTransitionContextMenu(ToolStrip menuStrip, int iInsert)
{
menuStrip.Items.Insert(iInsert, transitionsContextMenuItem);
// Sometimes child menuitems are stripped from the parent
if (transitionsContextMenuItem.DropDownItems.Count == 0)
{
transitionsContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
allTranContextMenuItem,
precursorsTranContextMenuItem,
productsTranContextMenuItem,
singleTranContextMenuItem,
totalTranContextMenuItem,
toolStripSeparatorTran,
basePeakContextMenuItem,
ticContextMenuItem,
qcContextMenuItem,
toolStripSeparatorOnlyQuantitative,
onlyQuantitativeContextMenuItem,
toolStripSeparatorSplitGraph,
splitGraphContextMenuItem,
});
}
}
// ReSharper disable SuggestBaseTypeForParameter
private static bool HasPeak(int iResult, ChromFileInfoId chromFileInfoId, TransitionGroupDocNode nodeGroup)
// ReSharper restore SuggestBaseTypeForParameter
{
foreach (TransitionDocNode nodeTran in nodeGroup.Children)
{
if (HasPeak(iResult, chromFileInfoId, nodeTran))
return true;
}
return false;
}
private static bool HasPeak(int iResults, ChromFileInfoId chromFileInfoId, TransitionDocNode nodeTran)
{
var chromInfo = GetTransitionChromInfo(nodeTran, iResults, chromFileInfoId);
return (chromInfo != null && !chromInfo.IsEmpty);
}
private void legendChromContextMenuItem_Click(object sender, EventArgs e)
{
ShowChromatogramLegends(legendChromContextMenuItem.Checked);
}
public void ShowChromatogramLegends(bool show)
{
Settings.Default.ShowChromatogramLegend = show;
UpdateChromGraphs();
}
private void massErrorContextMenuItem_Click(object sender, EventArgs e)
{
ShowMassErrors(massErrorContextMenuItem.Checked);
}
public void ShowMassErrors(bool show)
{
Settings.Default.ShowMassError = show;
UpdateChromGraphs();
}
private void peakBoundariesContextMenuItem_Click(object sender, EventArgs e)
{
ShowPeakBoundaries(peakBoundariesContextMenuItem.Checked);
}
private void originalPeakContextMenuItem_Click(object sender, EventArgs e)
{
ShowOriginalPeak(originalPeakMenuItem.Checked);
}
public void ShowPeakBoundaries(bool show)
{
Settings.Default.ShowPeakBoundaries = show;
UpdateChromGraphs();
}
public void ShowOriginalPeak(bool show)
{
Settings.Default.ShowOriginalPeak = show;
UpdateChromGraphs();
}
private void retentionTimesContextMenuItem_DropDownOpening(object sender, EventArgs e)
{
var showRT = GraphChromatogram.ShowRT;
allRTContextMenuItem.Checked = (showRT == ShowRTChrom.all);
bestRTContextMenuItem.Checked = (showRT == ShowRTChrom.best);
thresholdRTContextMenuItem.Checked = (showRT == ShowRTChrom.threshold);
noneRTContextMenuItem.Checked = (showRT == ShowRTChrom.none);
}
private void allRTContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowRetentionTimesEnum = ShowRTChrom.all.ToString();
UpdateChromGraphs();
}
private void bestRTContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowRetentionTimesEnum = ShowRTChrom.best.ToString();
UpdateChromGraphs();
}
private void thresholdRTContextMenuItem_Click(object sender, EventArgs e)
{
ShowChromatogramRTThresholdDlg();
}
public void ShowChromatogramRTThresholdDlg()
{
using (var dlg = new ChromatogramRTThresholdDlg())
{
double threshold = Settings.Default.ShowRetentionTimesThreshold;
if (threshold > 0)
dlg.Threshold = threshold;
if (dlg.ShowDialog(this) == DialogResult.OK)
{
Settings.Default.ShowRetentionTimesThreshold = dlg.Threshold;
Settings.Default.ShowRetentionTimesEnum = ShowRTChrom.threshold.ToString();
UpdateChromGraphs();
}
}
}
private void rawTimesContextMenuItem_Click(object sender, EventArgs e)
{
ToggleRawTimesMenuItem();
}
public void ToggleRawTimesMenuItem()
{
Settings.Default.ChromShowRawTimes = !Settings.Default.ChromShowRawTimes;
UpdateChromGraphs();
}
private void noneRTContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowRetentionTimesEnum = ShowRTChrom.none.ToString();
UpdateChromGraphs();
}
private void retentionTimePredContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowRetentionTimePred = retentionTimePredContextMenuItem.Checked;
UpdateChromGraphs();
}
private void peptideIDTimesContextMenuItem_Click(object sender, EventArgs e)
{
ShowPeptideIDTimes(idTimesMatchingContextMenuItem.Checked);
}
public void ShowPeptideIDTimes(bool show)
{
Settings.Default.ShowPeptideIdTimes = show;
UpdateChromGraphs();
}
private void alignedPeptideIDTimesToolStripMenuItem_Click(object sender, EventArgs e)
{
ShowAlignedPeptideIDTimes(idTimesAlignedContextMenuItem.Checked);
}
public void ShowAlignedPeptideIDTimes(bool show)
{
Settings.Default.ShowAlignedPeptideIdTimes = show;
UpdateChromGraphs();
}
private void peptideIDTimesFromOtherRunsToolStripMenuItem_Click(object sender, EventArgs e)
{
ShowOtherRunPeptideIDTimes(idTimesOtherContextMenuItem.Checked);
}
public void ShowOtherRunPeptideIDTimes(bool show)
{
Settings.Default.ShowUnalignedPeptideIdTimes = show;
UpdateChromGraphs();
}
private void idTimesNoneContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowPeptideIdTimes =
Settings.Default.ShowAlignedPeptideIdTimes =
Settings.Default.ShowUnalignedPeptideIdTimes = false;
UpdateChromGraphs();
}
private void nextReplicateMenuItem_Click(object sender, EventArgs e)
{
SelectedResultsIndex++;
}
private void previousReplicateMenuItem_Click(object sender, EventArgs e)
{
SelectedResultsIndex--;
}
private void transitionsMenuItem_DropDownOpening(object sender, EventArgs e)
{
var displayType = GraphChromatogram.DisplayType;
// If both MS1 and MS/MS ions are not possible, then menu items to differentiate precursors and
// products are not necessary.
bool showIonTypeOptions = IsMultipleIonSources;
precursorsTranMenuItem.Visible =
precursorsTranContextMenuItem.Visible =
productsTranMenuItem.Visible =
productsTranContextMenuItem.Visible = showIonTypeOptions;
if (!showIonTypeOptions &&
(displayType == DisplayTypeChrom.precursors || displayType == DisplayTypeChrom.products))
displayType = DisplayTypeChrom.all;
// Only show all ions chromatogram options when at least one chromatogram of this type exists
bool showAllIonsOptions = DocumentUI.Settings.HasResults &&
DocumentUI.Settings.MeasuredResults.HasAllIonsChromatograms;
basePeakMenuItem.Visible =
basePeakContextMenuItem.Visible =
ticMenuItem.Visible =
ticContextMenuItem.Visible =
qcMenuItem.Visible =
qcContextMenuItem.Visible =
toolStripSeparatorTranMain.Visible =
toolStripSeparatorTran.Visible = showAllIonsOptions;
if (!showAllIonsOptions &&
(displayType == DisplayTypeChrom.base_peak || displayType == DisplayTypeChrom.tic || displayType == DisplayTypeChrom.qc))
displayType = DisplayTypeChrom.all;
if (showAllIonsOptions)
{
qcMenuItem.DropDownItems.Clear();
qcContextMenuItem.DropDownItems.Clear();
var qcTraceNames = DocumentUI.MeasuredResults.QcTraceNames.ToList();
if (qcTraceNames.Count > 0)
{
var qcTraceItems = new ToolStripItem[qcTraceNames.Count];
var qcContextTraceItems = new ToolStripItem[qcTraceNames.Count];
for (int i = 0; i < qcTraceNames.Count; i++)
{
qcTraceItems[i] = new ToolStripMenuItem(qcTraceNames[i], null, qcMenuItem_Click)
{
Checked = displayType == DisplayTypeChrom.qc &&
Settings.Default.ShowQcTraceName == qcTraceNames[i]
};
qcContextTraceItems[i] = new ToolStripMenuItem(qcTraceNames[i], null, qcMenuItem_Click)
{
Checked = displayType == DisplayTypeChrom.qc &&
Settings.Default.ShowQcTraceName == qcTraceNames[i]
};
}
qcMenuItem.DropDownItems.AddRange(qcTraceItems);
qcContextMenuItem.DropDownItems.AddRange(qcContextTraceItems);
}
else
qcMenuItem.Visible = qcContextMenuItem.Visible = false;
}
precursorsTranMenuItem.Checked = precursorsTranContextMenuItem.Checked =
(displayType == DisplayTypeChrom.precursors);
productsTranMenuItem.Checked = productsTranContextMenuItem.Checked =
(displayType == DisplayTypeChrom.products);
singleTranMenuItem.Checked = singleTranContextMenuItem.Checked =
(displayType == DisplayTypeChrom.single);
allTranMenuItem.Checked = allTranContextMenuItem.Checked =
(displayType == DisplayTypeChrom.all);
totalTranMenuItem.Checked = totalTranContextMenuItem.Checked =
(displayType == DisplayTypeChrom.total);
basePeakMenuItem.Checked = basePeakContextMenuItem.Checked =
(displayType == DisplayTypeChrom.base_peak);
ticMenuItem.Checked = ticContextMenuItem.Checked =
(displayType == DisplayTypeChrom.tic);
splitGraphMenuItem.Checked = splitGraphContextMenuItem.Checked
= Settings.Default.SplitChromatogramGraph;
onlyQuantitativeContextMenuItem.Checked = onlyQuantitativeContextMenuItem.Checked
= Settings.Default.ShowQuantitativeOnly;
}
private bool IsMultipleIonSources
{
get
{
var nodeTreePep = SequenceTree.GetNodeOfType<PeptideTreeNode>();
if (nodeTreePep == null)
return false;
var fullScan = DocumentUI.Settings.TransitionSettings.FullScan;
return nodeTreePep.DocNode.TransitionGroups.Contains(
nodeGroup => GraphChromatogram.IsMultipleIonSources(fullScan, nodeGroup));
}
}
private void removePeakMenuItem_DropDownOpening(object sender, EventArgs e)
{
CanApplyOrRemovePeak(null, null, out _, out var canRemove);
if (!canRemove)
return;
if (!(sender is ToolStripMenuItem menu) || !menu.DropDownItems.OfType<object>().Any())
return;
var nodeGroupTree = SequenceTree.GetNodeOfType<TransitionGroupTreeNode>();
if (nodeGroupTree != null)
{
var nodeGroup = nodeGroupTree.DocNode;
var pathGroup = nodeGroupTree.Path;
var nodeTranTree = (TransitionTreeNode)SelectedNode;
var nodeTran = nodeTranTree.DocNode;
menu.DropDownItems.Clear();
if (nodeGroup.TransitionCount > 1)
{
var handler = new RemovePeakHandler(this, pathGroup, nodeGroup, null);
var item = new ToolStripMenuItem(Resources.SkylineWindow_removePeaksGraphMenuItem_DropDownOpening_All, null, handler.menuItem_Click);
menu.DropDownItems.Insert(0, item);
}
var chromInfo = GetTransitionChromInfo(nodeTran, SequenceTree.ResultsIndex, GetSelectedChromFileId());
if (chromInfo != null && !chromInfo.IsEmpty)
{
var handler = new RemovePeakHandler(this, pathGroup, nodeGroup, nodeTran);
var item = new ToolStripMenuItem(ChromGraphItem.GetTitle(nodeTran), null, handler.menuItem_Click);
menu.DropDownItems.Insert(0, item);
}
return;
}
var nodePepTree = SequenceTree.GetNodeOfType<PeptideTreeNode>();
if (nodePepTree != null)
{
var placeholder = menu.DropDownItems.OfType<object>().FirstOrDefault() as ToolStripMenuItem;
if (placeholder == null)
return;
var isotopeLabelType = placeholder.Tag as IsotopeLabelType;
if (isotopeLabelType == null)
return;
menu.DropDownItems.Clear();
var transitionGroupDocNode = nodePepTree.DocNode.TransitionGroups.FirstOrDefault(transitionGroup => Equals(transitionGroup.TransitionGroup.LabelType, isotopeLabelType));
if (transitionGroupDocNode == null)
return;
var item = new ToolStripMenuItem(Resources.SkylineWindow_removePeaksGraphMenuItem_DropDownOpening_All, null, removePeakMenuItem_Click);
menu.DropDownItems.Insert(0, item);
var handler = new RemovePeakHandler(this, new IdentityPath(nodePepTree.Path, transitionGroupDocNode.Id), transitionGroupDocNode, null);
item = new ToolStripMenuItem(isotopeLabelType.Title, null, handler.menuItem_Click);
menu.DropDownItems.Insert(0, item);
}
}
private class RemovePeakHandler
{
private readonly SkylineWindow _skyline;
private readonly IdentityPath _groupPath;
private readonly TransitionGroupDocNode _nodeGroup;
private readonly TransitionDocNode _nodeTran;
public RemovePeakHandler(SkylineWindow skyline, IdentityPath groupPath,
TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran)
{
_skyline = skyline;
_groupPath = groupPath;
_nodeGroup = nodeGroup;
_nodeTran = nodeTran;
}
public void menuItem_Click(object sender, EventArgs e)
{
_skyline.RemovePeak(_groupPath, _nodeGroup, _nodeTran);
}
}
private void applyPeakAllMenuItem_Click(object sender, EventArgs e)
{
ApplyPeak(false, false);
}
private void applyPeakSubsequentMenuItem_Click(object sender, EventArgs e)
{
ApplyPeak(true, false);
}
private void applyPeakGroupGraphMenuItem_Click(object sender, EventArgs e)
{
ApplyPeak(false, true);
}
public void ApplyPeak(bool subsequent, bool group)
{
CanApplyOrRemovePeak(null, null, out var canApply, out _);
if (!canApply)
return;
var nodePepTree = SequenceTree.GetNodeOfType<PeptideTreeNode>();
var nodeTranGroupTree = SequenceTree.GetNodeOfType<TransitionGroupTreeNode>();
var nodeTranGroup = nodeTranGroupTree?.DocNode;
using (var longWait = new LongWaitDlg(this) { Text = Resources.SkylineWindow_ApplyPeak_Applying_Peak })
{
SrmDocument doc = null;
try
{
var resultsIndex = SelectedResultsIndex;
var chromatogramSet = Document.MeasuredResults.Chromatograms[resultsIndex];
var resultsFile = GetGraphChrom(chromatogramSet.Name).GetChromFileInfoId();
var groupBy =
ReplicateValue.FromPersistedString(Document.Settings, Settings.Default.GroupApplyToBy);
object groupByValue = null;
if (groupBy != null)
{
groupByValue = groupBy.GetValue(new AnnotationCalculator(Document), chromatogramSet);
}
longWait.PerformWork(this, 800, monitor =>
doc = PeakMatcher.ApplyPeak(Document, nodePepTree, ref nodeTranGroup, resultsIndex, resultsFile, subsequent, groupBy, groupByValue, monitor));
}
catch (Exception x)
{
MessageDlg.ShowWithException(this, TextUtil.LineSeparate(Resources.SkylineWindow_ApplyPeak_Failed_to_apply_peak_, x.Message), x);
}
if (!longWait.IsCanceled && doc != null && !ReferenceEquals(doc, Document))
{
// ReSharper disable once PossibleNullReferenceException
var path = PropertyName.ROOT
.SubProperty(((PeptideGroupTreeNode) nodePepTree.SrmParent).DocNode.AuditLogText)
.SubProperty(nodePepTree.DocNode.AuditLogText)
.SubProperty(nodeTranGroup.AuditLogText);
var msg = subsequent ? MessageType.applied_peak_subsequent : MessageType.applied_peak_all;
ModifyDocument(Resources.SkylineWindow_PickPeakInChromatograms_Apply_picked_peak, document => doc,
docPair => AuditLogEntry.CreateSimpleEntry(msg, docPair.NewDocumentType, path.ToString()));
}
}
}
private void removePeakMenuItem_Click(object sender, EventArgs e)
{
var menu = sender as ToolStripMenuItem;
if (menu == null || menu.DropDownItems.OfType<object>().Any())
return;
bool removePeakByContextMenu = menu == removePeakContextMenuItem;
RemovePeak(removePeakByContextMenu);
}
public void RemovePeak(bool removePeakByContextMenu = false)
{
var chromFileInfoId = GetSelectedChromFileId();
CanApplyOrRemovePeak(null, null, out _, out var canRemove);
if (!canRemove)
return;
var nodeGroupTree = SequenceTree.GetNodeOfType<TransitionGroupTreeNode>();
var nodeGroups = new List<Tuple<TransitionGroupDocNode, IdentityPath>>();
var nodePepTree = SelectedNode as PeptideTreeNode;
if (nodeGroupTree != null)
{
nodeGroups.Add(new Tuple<TransitionGroupDocNode, IdentityPath>(nodeGroupTree.DocNode, nodeGroupTree.Path));
}
else if (nodePepTree != null && nodePepTree.Nodes.OfType<object>().Any())
{
nodeGroups.AddRange(from TransitionGroupDocNode tranGroup in nodePepTree.DocNode.Children
select
new Tuple<TransitionGroupDocNode, IdentityPath>(tranGroup, new IdentityPath(nodePepTree.Path, tranGroup.Id)));
}
else
{
return;
}
TransitionDocNode nodeTran = null;
if (removePeakByContextMenu)
{
var nodeTranTree = SelectedNode as TransitionTreeNode;
if (nodeTranTree != null)
{
nodeTran = nodeTranTree.DocNode;
}
}
if (nodeGroups.Count == 1)
{
var nodeGroup = nodeGroups.First();
RemovePeak(nodeGroup.Item2, nodeGroup.Item1, nodeTran);
}
else
{
// ReSharper disable once PossibleNullReferenceException
ModifyDocument(string.Format(Resources.SkylineWindow_removePeakContextMenuItem_Click_Remove_all_peaks_from__0_, nodePepTree.DocNode.ModifiedSequenceDisplay),
document => nodeGroups.Aggregate(Document,
(doc, nodeGroup) => RemovePeakInternal(doc, SelectedResultsIndex, chromFileInfoId, nodeGroup.Item2, nodeGroup.Item1, nodeTran)),
docPair =>
{
var peptideGroup = ((PeptideGroupTreeNode) nodePepTree.SrmParent).DocNode;
var name = PropertyName.ROOT.SubProperty(peptideGroup.AuditLogText)
.SubProperty(nodePepTree.DocNode.AuditLogText);
return AuditLogEntry.CreateSimpleEntry(MessageType.removed_all_peaks_from, docPair.OldDocumentType, name,
docPair.OldDoc.MeasuredResults.Chromatograms[SelectedResultsIndex].Name);
});
}
}
public void RemovePeak(IdentityPath groupPath, TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran)
{
string message = nodeTran == null
? string.Format(Resources.SkylineWindow_RemovePeak_Remove_all_peaks_from__0__, ChromGraphItem.GetTitle(nodeGroup))
: string.Format(Resources.SkylineWindow_RemovePeak_Remove_peak_from__0__, ChromGraphItem.GetTitle(nodeTran));
var chromFileInfoId = GetSelectedChromFileId();
ModifyDocument(message, doc => RemovePeakInternal(doc, SelectedResultsIndex, chromFileInfoId, groupPath, nodeGroup, nodeTran),
docPair =>
{
var msg = nodeTran == null ? MessageType.removed_all_peaks_from : MessageType.removed_peak_from;
var peptide = (PeptideDocNode) docPair.OldDoc.FindNode(groupPath.Parent);
var peptideGroup = (PeptideGroupDocNode) docPair.OldDoc.FindNode(groupPath.Parent.Parent);
var name = PropertyName.ROOT.SubProperty(peptideGroup.AuditLogText)
.SubProperty(peptide.AuditLogText).SubProperty(nodeGroup.AuditLogText);
if (nodeTran != null)
name = name.SubProperty(nodeTran.AuditLogText);
return AuditLogEntry.CreateSimpleEntry(msg, docPair.OldDocumentType, name,
docPair.OldDoc.MeasuredResults.Chromatograms[SelectedResultsIndex].Name);
});
}
private SrmDocument RemovePeakInternal(SrmDocument document, int resultsIndex, ChromFileInfoId chromFileInfoId, IdentityPath groupPath,
TransitionGroupDocNode nodeGroup, TransitionDocNode nodeTran)
{
ChromInfo chromInfo;
Transition transition;
if (nodeTran == null)
{
chromInfo = GetTransitionGroupChromInfo(nodeGroup, resultsIndex, chromFileInfoId);
transition = null;
}
else
{
chromInfo = GetTransitionChromInfo(nodeTran, resultsIndex, chromFileInfoId);
transition = nodeTran.Transition;
}
if (chromInfo == null)
return document;
MsDataFileUri filePath;
string name = GetGraphChromStrings(resultsIndex, chromInfo.FileId, out filePath);
return name == null
? document
: document.ChangePeak(groupPath, name, filePath, transition, 0, 0, UserSet.TRUE, PeakIdentification.FALSE, false);
}
private static TransitionGroupChromInfo GetTransitionGroupChromInfo(TransitionGroupDocNode nodeGroup, int iResults, ChromFileInfoId chromFileInfoId)
{
return nodeGroup.GetChromInfo(iResults, chromFileInfoId);
}
private static TransitionChromInfo GetTransitionChromInfo(TransitionDocNode nodeTran, int iResults, ChromFileInfoId chromFileInfoId)
{
return nodeTran.GetChromInfo(iResults, chromFileInfoId);
}
private void singleTranMenuItem_Click(object sender, EventArgs e)
{
ShowSingleTransition();
}
public void ShowSingleTransition()
{
SetDisplayTypeChrom(DisplayTypeChrom.single);
}
private void precursorsTranMenuItem_Click(object sender, EventArgs e)
{
ShowPrecursorTransitions();
}
public void ShowPrecursorTransitions()
{
SetDisplayTypeChrom(DisplayTypeChrom.precursors);
}
private void productsTranMenuItem_Click(object sender, EventArgs e)
{
ShowProductTransitions();
}
public void ShowProductTransitions()
{
SetDisplayTypeChrom(DisplayTypeChrom.products);
}
private void allTranMenuItem_Click(object sender, EventArgs e)
{
ShowAllTransitions();
}
public void ShowAllTransitions()
{
SetDisplayTypeChrom(DisplayTypeChrom.all);
}
private void totalTranMenuItem_Click(object sender, EventArgs e)
{
ShowTotalTransitions();
}
public void ShowTotalTransitions()
{
SetDisplayTypeChrom(DisplayTypeChrom.total);
}
private void basePeakMenuItem_Click(object sender, EventArgs e)
{
ShowBasePeak();
}
public void ShowBasePeak()
{
SetDisplayTypeChrom(DisplayTypeChrom.base_peak);
}
private void ticMenuItem_Click(object sender, EventArgs e)
{
ShowTic();
}
public void ShowTic()
{
SetDisplayTypeChrom(DisplayTypeChrom.tic);
}
private void qcMenuItem_Click(object sender, EventArgs e)
{
var qcTraceItem = sender as ToolStripMenuItem;
if (qcTraceItem == null)
throw new InvalidOperationException(@"qcMenuItem_Click must be triggered by a ToolStripMenuItem");
ShowQc(qcTraceItem.Text);
}
public void ShowQc(string qcTraceName)
{
Settings.Default.ShowQcTraceName = qcTraceName;
SetDisplayTypeChrom(DisplayTypeChrom.qc);
}
public void SetDisplayTypeChrom(DisplayTypeChrom displayType)
{
Settings.Default.ShowTransitionGraphs = displayType.ToString();
UpdateChromGraphs();
UpdateSpectrumGraph(false);
UpdateRetentionTimeGraph();
UpdatePeakAreaGraph();
UpdateMassErrorGraph();
}
private void transformChromMenuItem_DropDownOpening(object sender, EventArgs e)
{
var transform = GraphChromatogram.Transform;
transformChromNoneMenuItem.Checked = transformChromNoneContextMenuItem.Checked =
(transform == TransformChrom.raw);
transformChromInterploatedMenuItem.Checked = transformChromInterpolatedContextMenuItem.Checked =
(transform == TransformChrom.interpolated);
secondDerivativeMenuItem.Checked = secondDerivativeContextMenuItem.Checked =
(transform == TransformChrom.craw2d);
firstDerivativeMenuItem.Checked = firstDerivativeContextMenuItem.Checked =
(transform == TransformChrom.craw1d);
smoothSGChromMenuItem.Checked = smoothSGChromContextMenuItem.Checked =
(transform == TransformChrom.savitzky_golay);
}
private void transformChromNoneMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.TransformTypeChromatogram = TransformChrom.raw.ToString();
UpdateChromGraphs();
}
private void transformInterpolatedMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.TransformTypeChromatogram = TransformChrom.interpolated.ToString();
UpdateChromGraphs();
}
private void secondDerivativeMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.TransformTypeChromatogram = TransformChrom.craw2d.ToString();
UpdateChromGraphs();
}
private void firstDerivativeMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.TransformTypeChromatogram = TransformChrom.craw1d.ToString();
UpdateChromGraphs();
}
private void smoothSGChromMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.TransformTypeChromatogram = TransformChrom.savitzky_golay.ToString();
UpdateChromGraphs();
}
private void lockYChromContextMenuItem_Click(object sender, EventArgs e)
{
LockYChrom(lockYChromContextMenuItem.Checked);
}
public void LockYChrom (bool locked)
{
bool lockY = Settings.Default.LockYChrom = locked;
// Avoid updating the rest of the chart just to change the y-axis lock state
foreach (var chromatogram in _listGraphChrom)
chromatogram.LockYAxis(lockY);
}
private void synchronizeZoomingContextMenuItem_Click(object sender, EventArgs e)
{
SynchronizeZooming(synchronizeZoomingContextMenuItem.Checked);
}
public void SynchronizeZooming(bool isChecked)
{
bool zoomAll = Settings.Default.AutoZoomAllChromatograms = isChecked;
if (zoomAll)
{
var activeForm = dockPanel.ActiveContent;
int iActive = _listGraphChrom.IndexOf(chrom => ReferenceEquals(chrom, activeForm));
ZoomState zoomState = (iActive != -1 ? _listGraphChrom[iActive].ZoomState : null);
if (zoomState != null)
graphChromatogram_ZoomAll(null, new ZoomEventArgs(zoomState));
}
}
private void autozoomMenuItem_DropDownOpening(object sender, EventArgs e)
{
bool hasRt = (DocumentUI.Settings.PeptideSettings.Prediction.RetentionTime != null);
autoZoomRTWindowMenuItem.Enabled = autoZoomRTWindowContextMenuItem.Enabled = hasRt;
autoZoomBothMenuItem.Enabled = autoZoomBothContextMenuItem.Enabled = hasRt;
var zoom = GraphChromatogram.AutoZoom;
if (!hasRt)
{
if (zoom == AutoZoomChrom.window)
zoom = AutoZoomChrom.none;
else if (zoom == AutoZoomChrom.both)
zoom = AutoZoomChrom.peak;
}
autoZoomNoneMenuItem.Checked = autoZoomNoneContextMenuItem.Checked =
(zoom == AutoZoomChrom.none);
autoZoomBestPeakMenuItem.Checked = autoZoomBestPeakContextMenuItem.Checked =
(zoom == AutoZoomChrom.peak);
autoZoomRTWindowMenuItem.Checked = autoZoomRTWindowContextMenuItem.Checked =
(zoom == AutoZoomChrom.window);
autoZoomBothMenuItem.Checked = autoZoomBothContextMenuItem.Checked =
(zoom == AutoZoomChrom.both);
}
private void autoZoomNoneMenuItem_Click(object sender, EventArgs e)
{
AutoZoomNone();
}
public void AutoZoomNone()
{
Settings.Default.AutoZoomChromatogram = AutoZoomChrom.none.ToString();
UpdateChromGraphs();
}
private void autoZoomBestPeakMenuItem_Click(object sender, EventArgs e)
{
AutoZoomBestPeak();
}
public void AutoZoomBestPeak()
{
Settings.Default.AutoZoomChromatogram = AutoZoomChrom.peak.ToString();
UpdateChromGraphs();
}
private void autoZoomRTWindowMenuItem_Click(object sender, EventArgs e)
{
AutoZoomRTWindow();
}
public void AutoZoomRTWindow()
{
Settings.Default.AutoZoomChromatogram = AutoZoomChrom.window.ToString();
UpdateChromGraphs();
}
private void autoZoomBothMenuItem_Click(object sender, EventArgs e)
{
AutoZoomBoth();
}
public void AutoZoomBoth()
{
Settings.Default.AutoZoomChromatogram = AutoZoomChrom.both.ToString();
UpdateChromGraphs();
}
private void chromPropsContextMenuItem_Click(object sender, EventArgs e)
{
ShowChromatogramProperties();
}
public void ShowChromatogramProperties()
{
using (var dlg = new ChromChartPropertyDlg())
{
if (dlg.ShowDialog(this) == DialogResult.OK)
UpdateChromGraphs();
}
}
private void ShowGraphChrom(string name, bool show)
{
var graphChrom = GetGraphChrom(name);
if (graphChrom != null)
{
if (show)
{
graphChrom.Activate();
graphChrom.Focus();
}
else
if(graphChrom.DockState != DockState.Hidden)
graphChrom.Hide();
}
else if (show)
{
if (_listGraphChrom.Count >= MAX_GRAPH_CHROM)
{
// List is too long, re-purpose least recently used
graphChrom = _listGraphChrom[0];
graphChrom.ChangeChromatogram(name);
graphChrom.Activate();
graphChrom.Visible = true;
graphChrom.Focus();
}
else
{
graphChrom = CreateGraphChrom(name, SelectedGraphChromName, false);
}
}
if (show)
{
// Move this to end of MRU so it's seen as most recent
_listGraphChrom.Remove(graphChrom);
_listGraphChrom.Add(graphChrom);
}
}
public IEnumerable<GraphChromatogram> GraphChromatograms { get { return _listGraphChrom; } }
public GraphChromatogram GetGraphChrom(string name)
{
int iGraph = _listGraphChrom.IndexOf(graph => Equals(graph.NameSet, name));
return (iGraph != -1 ? _listGraphChrom[iGraph] : null);
}
// private bool IsGraphChromVisible(string name)
// {
// int iGraph = _listGraphChrom.IndexOf(graph => Equals(graph.NameSet, name));
// return iGraph != -1 && !_listGraphChrom[iGraph].IsHidden;
// }
public string SelectedGraphChromName
{
get
{
MsDataFileUri temp;
return GetGraphChromStrings(SelectedResultsIndex, null, out temp);
}
}
private string GetGraphChromStrings(int iResult, ChromFileInfoId fileId, out MsDataFileUri filePath)
{
filePath = null;
if (iResult != -1)
{
var settings = DocumentUI.Settings;
if (settings.HasResults && iResult < settings.MeasuredResults.Chromatograms.Count)
{
var chromatogramSet = settings.MeasuredResults.Chromatograms[iResult];
if (fileId != null)
filePath = chromatogramSet.GetFileInfo(fileId).FilePath;
return chromatogramSet.Name;
}
}
return null;
}
private GraphChromatogram CreateGraphChrom(string name)
{
var graphChrom = new GraphChromatogram(this, this, name);
graphChrom.FormClosed += graphChromatogram_FormClosed;
graphChrom.PickedPeak += graphChromatogram_PickedPeak;
graphChrom.ClickedChromatogram += graphChromatogram_ClickedChromatogram;
graphChrom.ChangedPeakBounds += graphChromatogram_ChangedPeakBounds;
graphChrom.PickedSpectrum += graphChromatogram_PickedSpectrum;
graphChrom.ZoomAll += graphChromatogram_ZoomAll;
_listGraphChrom.Add(graphChrom);
return graphChrom;
}
private void DestroyGraphChrom(GraphChromatogram graphChrom)
{
// Detach event handlers and dispose
graphChrom.FormClosed -= graphChromatogram_FormClosed;
graphChrom.PickedPeak -= graphChromatogram_PickedPeak;
graphChrom.ClickedChromatogram -= graphChromatogram_ClickedChromatogram;
graphChrom.ChangedPeakBounds -= graphChromatogram_ChangedPeakBounds;
graphChrom.PickedSpectrum -= graphChromatogram_PickedSpectrum;
graphChrom.ZoomAll -= graphChromatogram_ZoomAll;
graphChrom.HideOnClose = false;
graphChrom.Close();
}
private GraphChromatogram CreateGraphChrom(string name, string namePosition, bool split)
{
// Create a new spectrum graph
var graphChrom = CreateGraphChrom(name);
int firstDocumentPane = FirstDocumentPane;
if (firstDocumentPane == -1)
graphChrom.Show(dockPanel, DockState.Document);
else
{
var graphPosition = GetGraphChrom(namePosition);
IDockableForm formBefore;
DockPane paneExisting = FindChromatogramPane(graphPosition, out formBefore);
if (paneExisting == null)
graphChrom.Show(dockPanel.Panes[firstDocumentPane], DockPaneAlignment.Left, 0.5);
else if (!split)
{
graphChrom.Show(paneExisting, null); // Add to the end
}
else
{
var alignment = (graphChrom.Width > graphChrom.Height ?
DockPaneAlignment.Right : DockPaneAlignment.Bottom);
graphChrom.Show(paneExisting, alignment, 0.5);
}
}
return graphChrom;
}
private int FirstDocumentPane
{
get
{
return dockPanel.Panes.IndexOf(pane => !pane.IsHidden && pane.DockState == DockState.Document);
}
}
private DockPane FindChromatogramPane(GraphChromatogram graphChrom, out IDockableForm formBefore)
{
foreach (var pane in dockPanel.Panes)
{
foreach (IDockableForm form in pane.Contents)
{
if (form is GraphChromatogram &&
(graphChrom == null || graphChrom == form))
{
formBefore = form;
return pane;
}
}
}
formBefore = null;
return null;
}
private DockPane FindPane(IDockableForm dockableForm)
{
// Floating panes may be created but hidden for windows that allow floating
// Have to check "DisplayingContents.Count > 0" instead of "IsHidden" here, since "IsHidden" does not get updated when
// SuspendLayout is on.
int iPane = dockPanel.Panes.IndexOf(pane => pane.Contents.Contains(dockableForm) && pane.DisplayingContents.Count > 0);
return (iPane != -1 ? dockPanel.Panes[iPane] : null);
}
private void graphChromatogram_FormClosed(object sender, FormClosedEventArgs e)
{
_listGraphChrom.Remove((GraphChromatogram)sender);
}
private void graphChromatogram_PickedPeak(object sender, PickedPeakEventArgs e)
{
var graphChrom = sender as GraphChromatogram;
if (graphChrom != null)
graphChrom.LockZoom();
try
{
ModifyDocument(string.Format(Resources.SkylineWindow_graphChromatogram_PickedPeak_Pick_peak__0_F01_, e.RetentionTime),
doc => PickPeak(doc, e), docPair =>
{
var name = GetPropertyName(docPair.OldDoc, e.GroupPath, e.TransitionId);
return AuditLogEntry.CreateSimpleEntry(MessageType.picked_peak, docPair.OldDocumentType, name, e.NameSet,
e.RetentionTime.MeasuredTime.ToString(@"#.0", CultureInfo.CurrentCulture));
});
}
finally
{
if (graphChrom != null)
graphChrom.UnlockZoom();
}
}
private static PropertyName GetPropertyName(SrmDocument doc, IdentityPath groupPath, Identity transitionId)
{
var node = doc.FindNode(groupPath);
if (transitionId != null)
node = ((TransitionGroupDocNode)node).FindNode(transitionId);
return AuditLogEntry.GetNodeName(doc, node);
}
private void graphChromatogram_ClickedChromatogram(object sender, ClickedChromatogramEventArgs e)
{
if (e.ScanProvider != null)
{
var dataFile = e.ScanProvider.DataFilePath;
if (e.ScanIndex == -1)
{
MessageDlg.Show(this,
string.Format(Resources.SkylineWindow_graphChromatogram_ClickedChromatogram_The_raw_file_must_be_re_imported_in_order_to_show_full_scans___0_, dataFile));
return;
}
}
ShowGraphFullScan(e.ScanProvider, e.TransitionIndex, e.ScanIndex);
}
/// <summary>
/// Modifies a document in response to the user clicking on a peak in the GraphChromatogram.
/// </summary>
private static SrmDocument PickPeak(SrmDocument document, PickedPeakEventArgs e)
{
document = document.ChangePeak(e.GroupPath, e.NameSet, e.FilePath, e.TransitionId, e.RetentionTime.MeasuredTime, UserSet.TRUE);
var activeTransitionGroup = (TransitionGroupDocNode) document.FindNode(e.GroupPath);
if (activeTransitionGroup.RelativeRT != RelativeRT.Matching)
{
return document;
}
var activeChromInfo = FindChromInfo(document, activeTransitionGroup, e.NameSet, e.FilePath);
var peptide = (PeptideDocNode) document.FindNode(e.GroupPath.Parent);
// See if there are any other transition groups that should have their peak bounds set to the same value
foreach (var transitionGroup in peptide.TransitionGroups)
{
if (transitionGroup.RelativeRT != RelativeRT.Matching)
{
continue;
}
var groupPath = new IdentityPath(e.GroupPath.Parent, transitionGroup.TransitionGroup);
if (Equals(groupPath, e.GroupPath))
{
continue;
}
var chromInfo = FindChromInfo(document, transitionGroup, e.NameSet, e.FilePath);
if (null == chromInfo)
{
continue;
}
document = document.ChangePeak(groupPath, e.NameSet, e.FilePath, null,
activeChromInfo.StartRetentionTime, activeChromInfo.EndRetentionTime, UserSet.TRUE, activeChromInfo.Identified, true);
}
return document;
}
/// <summary>
/// Finds the TransitionGroupChromInfo that matches the specified ChromatogramSet name and file path.
/// </summary>
public static TransitionGroupChromInfo FindChromInfo(SrmDocument document,
TransitionGroupDocNode transitionGroupDocNode, string nameChromatogramSet, MsDataFileUri filePath)
{
ChromatogramSet chromatogramSet;
int indexSet;
if (!document.Settings.MeasuredResults.TryGetChromatogramSet(nameChromatogramSet, out chromatogramSet, out indexSet))
{
return null;
}
var chromFileInfoId = chromatogramSet.FindFile(filePath);
if (null == chromFileInfoId)
{
return null;
}
var results = transitionGroupDocNode.Results[indexSet];
if (results.IsEmpty)
{
return null;
}
return results.FirstOrDefault(chromInfo => ReferenceEquals(chromFileInfoId, chromInfo.FileId));
}
private void graphChromatogram_ChangedPeakBounds(object sender, ChangedMultiPeakBoundsEventArgs eMulti)
{
var graphChrom = sender as GraphChromatogram;
if (graphChrom != null)
graphChrom.LockZoom();
try
{
string message;
// Handle most common case of a change to a single group first.
if (eMulti.Changes.Length == 1)
{
ChangedPeakBoundsEventArgs e = eMulti.Changes[0];
if (Equals(e.StartTime, e.EndTime))
message = Resources.SkylineWindow_graphChromatogram_ChangedPeakBounds_Remove_peak;
else if (e.ChangeType == PeakBoundsChangeType.both)
message = string.Format(Resources.SkylineWindow_graphChromatogram_ChangedPeakBounds_Change_peak_to__0_F01___1_F01_, e.StartTime, e.EndTime);
else if (e.ChangeType == PeakBoundsChangeType.start)
message = string.Format(Resources.SkylineWindow_graphChromatogram_ChangedPeakBounds_Change_peak_start_to__0_F01_, e.StartTime);
else
message = string.Format(Resources.SkylineWindow_graphChromatogram_ChangedPeakBounds_Change_peak_end_to__0_F01_, e.EndTime);
}
else
{
message = Resources.SkylineWindow_graphChromatogram_ChangedPeakBounds_Change_peaks;
}
ModifyDocument(message,
doc => ChangePeakBounds(Document, eMulti.Changes), docPair =>
{
var names = eMulti.Changes.Select(change =>
GetPropertyName(docPair.OldDoc, change.GroupPath, change.Transition)).ToArray();
var messages = eMulti.Changes
.SelectMany((change, index) => GetMessagesForPeakBoundsChange(names[index], change))
.ToList();
if (messages.Count == 1)
{
return AuditLogEntry.CreateSingleMessageEntry(messages[0]);
}
else if (messages.Count > 1)
{
var firstName = names.First();
if (names.All(name => Equals(name, firstName)))
{
return AuditLogEntry
.CreateSimpleEntry(MessageType.changed_peak_bounds_of, docPair.OldDocumentType, firstName)
.ChangeAllInfo(messages);
}
else // TODO: is this even possible?+
{
return AuditLogEntry
.CreateSimpleEntry(MessageType.changed_peak_bounds, docPair.OldDocumentType)
.ChangeAllInfo(messages);
}
}
return null;
});
}
finally
{
if (graphChrom != null)
graphChrom.UnlockZoom();
}
}
private List<MessageInfo> GetMessagesForPeakBoundsChange(PropertyName name, ChangedPeakBoundsEventArgs args)
{
var singleTransitionDisplay = args.Transition != null;
var result = new List<MessageInfo>();
var transitionGroupDocNode = (TransitionGroupDocNode) Document.FindNode(args.GroupPath);
var transitionDocNode = singleTransitionDisplay
? transitionGroupDocNode.Transitions.FirstOrDefault(tr => ReferenceEquals(tr.Id, args.Transition))
: null;
ChromatogramSet chromatograms;
int indexSet;
if (!Document.Settings.HasResults ||
!Document.Settings.MeasuredResults.TryGetChromatogramSet(args.NameSet, out chromatograms, out indexSet))
return result;
float? startTime = null;
float? endTime = null;
if (singleTransitionDisplay)
{
if (transitionDocNode != null)
{
var chromInfo = transitionDocNode.Results[indexSet].FirstOrDefault(ci => ci.OptimizationStep == 0);
if (chromInfo != null)
{
startTime = chromInfo.StartRetentionTime;
endTime = chromInfo.EndRetentionTime;
}
}
}
else
{
var chromInfo = transitionGroupDocNode.Results[indexSet].FirstOrDefault(ci => ci.OptimizationStep == 0);
if (chromInfo != null)
{
startTime = chromInfo.StartRetentionTime;
endTime = chromInfo.EndRetentionTime;
}
}
if (args.ChangeType == PeakBoundsChangeType.start || args.ChangeType == PeakBoundsChangeType.both)
{
result.Add(new MessageInfo(
singleTransitionDisplay ? MessageType.changed_peak_start : MessageType.changed_peak_start_all,
Document.DocumentType,
name, args.NameSet, LogMessage.RoundDecimal(startTime, 2),
LogMessage.RoundDecimal(args.StartTime.MeasuredTime, 2)));
}
if (args.ChangeType == PeakBoundsChangeType.end || args.ChangeType == PeakBoundsChangeType.both)
{
result.Add(new MessageInfo(
singleTransitionDisplay ? MessageType.changed_peak_end : MessageType.changed_peak_end_all, Document.DocumentType, name,
args.NameSet, LogMessage.RoundDecimal(endTime, 2),
LogMessage.RoundDecimal(args.EndTime.MeasuredTime, 2)));
}
return result;
}
/// <summary>
/// Modifies a document in response to a user's mouse dragging on a GraphChromatogram.
/// </summary>
private static SrmDocument ChangePeakBounds(SrmDocument document, IEnumerable<ChangedPeakBoundsEventArgs> changes)
{
var changedGroupIds = new HashSet<IdentityPath>();
var peptideChanges = new Dictionary<IdentityPath, ChangedPeakBoundsEventArgs>();
foreach (var change in changes)
{
document = document.ChangePeak(change.GroupPath, change.NameSet, change.FilePath, change.Transition,
change.StartTime.MeasuredTime, change.EndTime.MeasuredTime, UserSet.TRUE, change.Identified, false);
changedGroupIds.Add(change.GroupPath);
if (!peptideChanges.ContainsKey(change.GroupPath.Parent)) {
var transitionGroup = (TransitionGroupDocNode) document.FindNode(change.GroupPath);
if (transitionGroup.RelativeRT == RelativeRT.Matching)
{
peptideChanges.Add(change.GroupPath.Parent, change);
}
}
}
// See if there are any other TransitionGroups that also have RelativeRT matching,
// and set their peak boundaries to the same.
foreach (var entry in peptideChanges)
{
var peptide = (PeptideDocNode) document.FindNode(entry.Key);
var change = entry.Value;
foreach (var transitionGroup in peptide.TransitionGroups)
{
if (transitionGroup.RelativeRT != RelativeRT.Matching)
{
continue;
}
var groupId = new IdentityPath(entry.Key, transitionGroup.TransitionGroup);
if (changedGroupIds.Contains(groupId))
{
continue;
}
if (null == FindChromInfo(document, transitionGroup, change.NameSet, change.FilePath))
{
continue;
}
document = document.ChangePeak(groupId, change.NameSet, change.FilePath, null,
change.StartTime.MeasuredTime, change.EndTime.MeasuredTime, UserSet.TRUE, change.Identified, true);
}
}
return document;
}
private void graphChromatogram_PickedSpectrum(object sender, PickedSpectrumEventArgs e)
{
if (_graphSpectrum == null || !_graphSpectrum.Visible)
{
ShowGraphSpectrum(true);
}
if (_graphSpectrum != null)
_graphSpectrum.SelectSpectrum(e.SpectrumId);
}
private void graphChromatogram_ZoomAll(object sender, ZoomEventArgs e)
{
foreach (var graphChrom in _listGraphChrom)
{
if (!ReferenceEquals(sender, graphChrom))
{
graphChrom.ZoomTo(e.ZoomState);
graphChrom.UpdateUI();
}
}
}
private void UpdateChromGraphs()
{
foreach (var graphChrom in _listGraphChrom)
graphChrom.UpdateUI();
// TODO(nicksh): we want to also update GraphSpectrum at this time, but there are issues with
// this being called reentrantly.
// if (null != GraphSpectrum)
// {
// GraphSpectrum.UpdateUI();
// }
}
private void closeAllChromatogramsMenuItem_Click(object sender, EventArgs e)
{
CloseAllChromatograms();
}
public void CloseAllChromatograms()
{
foreach (var graphChromatogram in _listGraphChrom.ToList())
{
graphChromatogram.Hide();
}
}
private void closeChromatogramMenuItem_Click(object sender, EventArgs e)
{
var graphChromatogram = _listGraphChrom.LastOrDefault(g => !g.IsHidden);
if (graphChromatogram != null)
{
graphChromatogram.Hide();
graphChromatogram = _listGraphChrom.LastOrDefault(g => !g.IsHidden);
graphChromatogram?.Activate();
}
}
#endregion
private void splitChromGraphMenuItem_Click(object sender, EventArgs e)
{
ShowSplitChromatogramGraph(!Settings.Default.SplitChromatogramGraph);
}
public void ShowSplitChromatogramGraph(bool split)
{
Settings.Default.SplitChromatogramGraph = split;
UpdateGraphPanes();
}
private void onlyQuantitativeMenuItem_Click(object sender, EventArgs e)
{
ShowOnlyQuantitative(!Settings.Default.ShowQuantitativeOnly);
}
public void ShowOnlyQuantitative(bool showOnlyQuantitative)
{
Settings.Default.ShowQuantitativeOnly = showOnlyQuantitative;
UpdateGraphPanes();
}
/// <summary>
/// Returns a rectangle suitable for positioning a floating DockableForm.
/// The size of the rectangle is based off of the size of the DockPanel, and the size of the screen.
/// </summary>
private Rectangle GetFloatingRectangleForNewWindow()
{
var rectFloat = dockPanel.Bounds;
rectFloat = dockPanel.RectangleToScreen(rectFloat);
rectFloat.X += rectFloat.Width / 4;
rectFloat.Y += rectFloat.Height / 3;
rectFloat.Width = Math.Max(600, rectFloat.Width / 2);
rectFloat.Height = Math.Max(440, rectFloat.Height / 2);
if (Program.SkylineOffscreen)
{
var offscreenPoint = GetOffscreenPoint();
rectFloat.X = offscreenPoint.X;
rectFloat.Y = offscreenPoint.Y;
}
else
{
// Make sure it is on the screen.
var screen = Screen.FromControl(dockPanel);
var rectScreen = screen.WorkingArea;
rectFloat.X = Math.Max(rectScreen.X, Math.Min(rectScreen.Width - rectFloat.Width, rectFloat.X));
rectFloat.Y = Math.Max(rectScreen.Y, Math.Min(rectScreen.Height - rectFloat.Height, rectFloat.Y));
}
return rectFloat;
}
private bool GraphVisible(IEnumerable<GraphSummary> graphs, GraphTypeSummary type)
{
return graphs.Any(g => g.Type == type && !g.IsHidden);
}
private bool GraphChecked(IEnumerable<GraphSummary> graphs, IList<GraphTypeSummary> types, GraphTypeSummary type)
{
return (types.Contains(type)) && GraphVisible(graphs, type);
}
private void ShowGraph(List<GraphSummary> graphs, bool show, GraphTypeSummary type,
Func<GraphTypeSummary, GraphSummary> createGraph)
{
var graph = graphs.FirstOrDefault(g => g.Type == type);
if (show)
{
if (graph != null && !Program.SkylineOffscreen)
{
graphs.Remove(graph);
graphs.Insert(0, graph);
graph.Controller.GraphTypes.Insert(0, type);
if (graphs.Count > 1 && !graphs[1].IsHidden)
graph.Show(FindPane(graphs[1]), null);
else
graph.Activate();
}
else
{
if (graph == null)
graph = createGraph(type);
if (graphs.Count > 1 && !graphs[1].IsHidden)
{
graph.Show(FindPane(graphs[1]), null);
}
else
{
// Choose a position to float the window
var rectFloat = GetFloatingRectangleForNewWindow();
graph.Show(dockPanel, rectFloat);
}
}
}
else if (graph != null)
{
graph.Hide();
}
}
#region Retention time graph
public GraphSummary GraphRetentionTime { get { return _listGraphRetentionTime.FirstOrDefault(); } }
public bool IsGraphRetentionTimeShown(GraphTypeSummary type)
{
return _listGraphRetentionTime.Any(g => g.Type == type && !g.IsHidden);
}
private void UpdateUIGraphRetentionTime(Func<GraphTypeSummary, bool> isEnabled)
{
var list = Settings.Default.RTGraphTypes.ToArray();
ShowGraphRetentionTime(isEnabled);
if (!list.All(isEnabled))
{
Settings.Default.RTGraphTypes.Clear();
Settings.Default.RTGraphTypes.AddRange(list);
}
}
public void ShowGraphRetentionTime(bool show)
{
ShowGraphRetentionTime(t => show && IsRetentionTimeGraphTypeEnabled(t));
}
private bool IsRetentionTimeGraphTypeEnabled(GraphTypeSummary type)
{
bool enabled = DocumentUI.Settings.HasResults;
switch (type)
{
// Can only do run to run regression with at least 2 replicates
case GraphTypeSummary.run_to_run_regression:
return enabled && DocumentUI.Settings.MeasuredResults.Chromatograms.Count > 1;
// Scheduling can be enabled with a predictor even if there are no results
case GraphTypeSummary.schedule:
return enabled || DocumentUI.Settings.PeptideSettings.Prediction.RetentionTime != null;
default:
return enabled;
}
}
private void ShowGraphRetentionTime(Func<GraphTypeSummary, bool> isEnabled)
{
Settings.Default.RTGraphTypes.ToList().ForEach(t =>
ShowGraphRetentionTime(isEnabled(t), t));
}
public void ShowGraphRetentionTime(bool show, GraphTypeSummary type)
{
ShowGraph(_listGraphRetentionTime, show, type, CreateGraphRetentionTime);
}
private GraphSummary CreateGraphRetentionTime(GraphTypeSummary type)
{
if (type == GraphTypeSummary.invalid)
return null;
var targetIndex = SelectedResultsIndex;
var origIndex = -1;
if(ComboResults != null && ComboResults.Items.Count > 0)
origIndex = (SelectedResultsIndex + 1) % ComboResults.Items.Count;
var graph = new GraphSummary(type, this, new RTGraphController(), targetIndex, origIndex);
graph.FormClosed += graphRetentionTime_FormClosed;
graph.VisibleChanged += graphRetentionTime_VisibleChanged;
graph.GraphControl.ZoomEvent += GraphControl_ZoomEvent;
graph.Toolbar = new RunToRunRegressionToolbar(graph);
_listGraphRetentionTime.Insert(0, graph);
return graph;
}
private void DestroyGraphRetentionTime(GraphSummary graph)
{
graph.FormClosed -= graphRetentionTime_FormClosed;
graph.VisibleChanged -= graphRetentionTime_VisibleChanged;
graph.HideOnClose = false;
graph.Close();
_listGraphRetentionTime.Remove(graph);
Settings.Default.RTGraphTypes.Remove(graph.Type);
}
private void graphRetentionTime_VisibleChanged(object sender, EventArgs e)
{
var graph = (GraphSummary) sender;
if (graph.Visible)
{
Settings.Default.RTGraphTypes.Insert(0, graph.Type);
_listGraphRetentionTime.Remove(graph);
_listGraphRetentionTime.Insert(0, graph);
}
else if (graph.IsHidden)
{
Settings.Default.RTGraphTypes.Remove(graph.Type);
}
}
private void graphRetentionTime_FormClosed(object sender, FormClosedEventArgs e)
{
GraphSummary graph = (GraphSummary)sender;
_listGraphRetentionTime.Remove(graph);
Settings.Default.RTGraphTypes.Remove(graph.Type);
}
void GraphSummary.IStateProvider.BuildGraphMenu(ZedGraphControl zedGraphControl, ContextMenuStrip menuStrip, Point mousePt,
GraphSummary.IController controller)
{
ContextMenuGraphSummary = controller.GraphSummary;
var graphController = controller as RTGraphController;
if (graphController != null)
BuildRTGraphMenu(controller.GraphSummary, menuStrip, mousePt, graphController);
else if (controller is AreaGraphController)
BuildAreaGraphMenu(controller.GraphSummary, menuStrip, mousePt);
else if (controller is MassErrorGraphController)
BuildMassErrorGraphMenu(controller.GraphSummary, menuStrip);
else if (controller is DetectionsGraphController)
BuildDetectionsGraphMenu(controller.GraphSummary, menuStrip);
CopyEmfToolStripMenuItem.AddToContextMenu(zedGraphControl, menuStrip);
}
public SrmDocument SelectionDocument
{
get { return SequenceTree != null ? SequenceTree.Document : null; }
}
public TreeNodeMS SelectedNode
{
get { return SequenceTree != null ? SequenceTree.SelectedNode as TreeNodeMS : null; }
}
public IdentityPath SelectedPath
{
get { return SequenceTree != null ? SequenceTree.SelectedPath : new IdentityPath(); }
set { SequenceTree.SelectedPath = value; }
}
public IList<TreeNodeMS> SelectedNodes
{
get { return SequenceTree != null ? SequenceTree.SelectedNodes.ToArray() : new TreeNodeMS[0]; }
}
public int SelectedResultsIndex
{
get { return ComboResults != null ? ComboResults.SelectedIndex : -1; }
set
{
if (ComboResults != null && 0 <= value && value < ComboResults.Items.Count)
{
var focusStart = User32.GetFocusedControl();
ComboResults.SelectedIndex = value;
if (focusStart != null)
{
// Avoid just setting focus back to the chromatogram graph
// that just lost activation and reactivating it.
if (IsChromatogramGraph(focusStart))
dockPanel.ActivePane.Focus();
else
focusStart.Focus();
}
}
}
}
/// <summary>
/// Returns true if a control is or belongs to a <see cref="GraphChromatogram"/>.
/// </summary>
private static bool IsChromatogramGraph(Control control)
{
while (control != null)
{
if (control is GraphChromatogram)
return true;
control = control.Parent;
}
return false;
}
public MsDataFileUri SelectedScanFile { get; set; }
public double SelectedScanRetentionTime { get; set; }
public Identity SelectedScanTransition { get; set; }
public void ActivateReplicate(string name)
{
int index;
ChromatogramSet chromatogramSet;
if (DocumentUI.Settings.MeasuredResults.TryGetChromatogramSet(name, out chromatogramSet, out index))
{
SelectedResultsIndex = index;
}
}
public void SelectPath(IdentityPath focusPath)
{
SequenceTree.SelectPath(focusPath);
UpdateGraphPanes();
}
public SpectrumDisplayInfo SelectedSpectrum
{
get { return _graphSpectrum != null ? _graphSpectrum.SelectedSpectrum : null; }
}
public void ActivateSpectrum()
{
ShowGraphSpectrum(true);
}
private void BuildRTGraphMenu(GraphSummary graph, ToolStrip menuStrip, Point mousePt, RTGraphController controller)
{
// Store original menuitems in an array, and insert a separator
ToolStripItem[] items = new ToolStripItem[menuStrip.Items.Count];
int iUnzoom = -1;
for (int i = 0; i < items.Length; i++)
{
items[i] = menuStrip.Items[i];
string tag = (string)items[i].Tag;
if (tag == @"unzoom")
iUnzoom = i;
}
if (iUnzoom != -1)
menuStrip.Items.Insert(iUnzoom, toolStripSeparator25);
// Insert skyline specific menus
var set = Settings.Default;
int iInsert = 0;
menuStrip.Items.Insert(iInsert++, timeGraphContextMenuItem);
if (timeGraphContextMenuItem.DropDownItems.Count == 0)
{
timeGraphContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
replicateComparisonContextMenuItem,
timePeptideComparisonContextMenuItem,
regressionContextMenuItem,
schedulingContextMenuItem
});
}
if (regressionContextMenuItem.DropDownItems.Count == 0)
{
regressionContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
scoreToRunToolStripMenuItem,
runToRunToolStripMenuItem
});
}
GraphTypeSummary graphType = graph.Type;
if (graphType == GraphTypeSummary.score_to_run_regression || graphType == GraphTypeSummary.run_to_run_regression)
{
var runToRun = graphType == GraphTypeSummary.run_to_run_regression;
menuStrip.Items.Insert(iInsert++, timePlotContextMenuItem);
if (timePlotContextMenuItem.DropDownItems.Count == 0)
{
timePlotContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
timeCorrelationContextMenuItem,
timeResidualsContextMenuItem
});
}
timeCorrelationContextMenuItem.Checked = RTGraphController.PlotType == PlotTypeRT.correlation;
timeResidualsContextMenuItem.Checked = RTGraphController.PlotType == PlotTypeRT.residuals;
menuStrip.Items.Insert(iInsert++,setRegressionMethodContextMenuItem);
if (setRegressionMethodContextMenuItem.DropDownItems.Count == 0)
{
setRegressionMethodContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
linearRegressionContextMenuItem,
kernelDensityEstimationContextMenuItem,
loessContextMenuItem
});
}
linearRegressionContextMenuItem.Checked = RTGraphController.RegressionMethod == RegressionMethodRT.linear;
kernelDensityEstimationContextMenuItem.Checked = RTGraphController.RegressionMethod == RegressionMethodRT.kde;
logRegressionContextMenuItem.Checked = RTGraphController.RegressionMethod == RegressionMethodRT.log;
loessContextMenuItem.Checked = RTGraphController.RegressionMethod == RegressionMethodRT.loess;
var showPointsTypeStandards = Document.GetRetentionTimeStandards().Any();
var showPointsTypeDecoys = Document.PeptideGroups.Any(nodePepGroup => nodePepGroup.Children.Cast<PeptideDocNode>().Any(nodePep => nodePep.IsDecoy));
var qvalues = Document.Settings.PeptideSettings.Integration.PeakScoringModel.IsTrained;
if (showPointsTypeStandards || showPointsTypeDecoys || qvalues)
{
menuStrip.Items.Insert(iInsert++, timePointsContextMenuItem);
if (timePointsContextMenuItem.DropDownItems.Count == 0)
{
timePointsContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
timeTargetsContextMenuItem,
timeStandardsContextMenuItem,
timeDecoysContextMenuItem
});
if (Document.Settings.HasResults &&
Document.Settings.PeptideSettings.Integration.PeakScoringModel.IsTrained)
{
timePointsContextMenuItem.DropDownItems.Insert(1, targetsAt1FDRToolStripMenuItem);
}
}
timeStandardsContextMenuItem.Visible = showPointsTypeStandards;
timeDecoysContextMenuItem.Visible = showPointsTypeDecoys;
timeTargetsContextMenuItem.Checked = RTGraphController.PointsType == PointsTypeRT.targets;
targetsAt1FDRToolStripMenuItem.Checked = RTGraphController.PointsType == PointsTypeRT.targets_fdr;
timeStandardsContextMenuItem.Checked = RTGraphController.PointsType == PointsTypeRT.standards;
timeDecoysContextMenuItem.Checked = RTGraphController.PointsType == PointsTypeRT.decoys;
}
refineRTContextMenuItem.Checked = set.RTRefinePeptides;
//Grey out so user knows we cannot refine with current regression method
refineRTContextMenuItem.Enabled = RTGraphController.CanDoRefinementForRegressionMethod;
menuStrip.Items.Insert(iInsert++, refineRTContextMenuItem);
if (!runToRun)
{
predictionRTContextMenuItem.Checked = set.RTPredictorVisible;
menuStrip.Items.Insert(iInsert++, predictionRTContextMenuItem);
iInsert = AddReplicatesContextMenu(menuStrip, iInsert);
}
menuStrip.Items.Insert(iInsert++, setRTThresholdContextMenuItem);
if (!runToRun)
{
menuStrip.Items.Insert(iInsert++, toolStripSeparator22);
menuStrip.Items.Insert(iInsert++, createRTRegressionContextMenuItem);
menuStrip.Items.Insert(iInsert++, chooseCalculatorContextMenuItem);
if (chooseCalculatorContextMenuItem.DropDownItems.Count == 0)
{
chooseCalculatorContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
placeholderToolStripMenuItem1,
toolStripSeparatorCalculators,
addCalculatorContextMenuItem,
updateCalculatorContextMenuItem
});
}
}
var regressionRT = controller.RegressionRefined;
createRTRegressionContextMenuItem.Enabled = (regressionRT != null) && !runToRun;
updateCalculatorContextMenuItem.Visible = (regressionRT != null &&
Settings.Default.RTScoreCalculatorList.CanEditItem(regressionRT.Calculator) && !runToRun);
bool showDelete = controller.ShowDelete(mousePt);
bool showDeleteOutliers = controller.ShowDeleteOutliers;
if (showDelete || showDeleteOutliers)
{
menuStrip.Items.Insert(iInsert++, toolStripSeparator23);
if (showDelete)
menuStrip.Items.Insert(iInsert++, removeRTContextMenuItem);
if (showDeleteOutliers)
menuStrip.Items.Insert(iInsert++, removeRTOutliersContextMenuItem);
}
}
else if (graphType == GraphTypeSummary.schedule)
{
menuStrip.Items.Insert(iInsert++, toolStripSeparator38);
menuStrip.Items.Insert(iInsert++, timePropsContextMenuItem);
}
else
{
menuStrip.Items.Insert(iInsert++, toolStripSeparator16);
menuStrip.Items.Insert(iInsert++, rtValueMenuItem);
if (rtValueMenuItem.DropDownItems.Count == 0)
{
rtValueMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
allRTValueContextMenuItem,
timeRTValueContextMenuItem,
fwhmRTValueContextMenuItem,
fwbRTValueContextMenuItem
});
}
AddTransitionContextMenu(menuStrip, iInsert++);
if (graphType == GraphTypeSummary.replicate)
{
iInsert = AddReplicateOrderAndGroupByMenuItems(menuStrip, iInsert);
var rtReplicateGraphPane = graph.GraphPanes.FirstOrDefault() as RTReplicateGraphPane;
if (rtReplicateGraphPane != null && rtReplicateGraphPane.CanShowRTLegend)
{
showRTLegendContextMenuItem.Checked = set.ShowRetentionTimesLegend;
menuStrip.Items.Insert(iInsert++, showRTLegendContextMenuItem);
}
if (rtReplicateGraphPane != null)
{
ChromFileInfoId chromFileInfoId = null;
if (DocumentUI.Settings.HasResults)
{
var chromatogramSet = DocumentUI.Settings.MeasuredResults.Chromatograms[SelectedResultsIndex];
if (chromatogramSet.MSDataFileInfos.Count == 1)
{
chromFileInfoId = chromatogramSet.MSDataFileInfos[0].FileId;
}
}
iInsert = InsertAlignmentMenuItems(menuStrip.Items, chromFileInfoId, iInsert);
}
}
else if (graphType == GraphTypeSummary.peptide)
{
AddPeptideOrderContextMenu(menuStrip, iInsert++);
iInsert = AddReplicatesContextMenu(menuStrip, iInsert);
AddScopeContextMenu(menuStrip, iInsert++);
InsertAlignmentMenuItems(menuStrip.Items, null, iInsert);
}
if (graphType == GraphTypeSummary.peptide || null != SummaryReplicateGraphPane.GroupByReplicateAnnotation)
{
menuStrip.Items.Insert(iInsert++, peptideCvsContextMenuItem);
peptideCvsContextMenuItem.Checked = set.ShowPeptideCV;
}
selectionContextMenuItem.Checked = set.ShowReplicateSelection;
menuStrip.Items.Insert(iInsert++, selectionContextMenuItem);
synchronizeSummaryZoomingContextMenuItem.Checked = set.SynchronizeSummaryZooming;
menuStrip.Items.Insert(iInsert++, synchronizeSummaryZoomingContextMenuItem);
menuStrip.Items.Insert(iInsert++, toolStripSeparator38);
menuStrip.Items.Insert(iInsert++, timePropsContextMenuItem);
var isotopeLabelType = graph.GraphPaneFromPoint(mousePt) != null
? graph.GraphPaneFromPoint(mousePt).PaneKey.IsotopeLabelType
: null;
AddApplyRemovePeak(menuStrip, removePeakGraphMenuItem.DropDownItems, isotopeLabelType, -1, ref iInsert);
}
menuStrip.Items.Insert(iInsert, toolStripSeparator24);
// Remove some ZedGraph menu items not of interest
foreach (var item in items)
{
string tag = (string)item.Tag;
if (tag == @"set_default" || tag == @"show_val")
menuStrip.Items.Remove(item);
}
}
private void AddScopeContextMenu(ToolStrip menuStrip, int iInsert)
{
menuStrip.Items.Insert(iInsert, scopeContextMenuItem);
if (scopeContextMenuItem.DropDownItems.Count == 0)
{
scopeContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
documentScopeContextMenuItem,
proteinScopeContextMenuItem
});
}
}
private int AddReplicatesContextMenu(ToolStrip menuStrip, int iInsert)
{
if (DocumentUI.Settings.HasResults &&
DocumentUI.Settings.MeasuredResults.Chromatograms.Count > 1)
{
menuStrip.Items.Insert(iInsert++, replicatesRTContextMenuItem);
if (replicatesRTContextMenuItem.DropDownItems.Count == 0)
{
replicatesRTContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
averageReplicatesContextMenuItem,
singleReplicateRTContextMenuItem,
bestReplicateRTContextMenuItem
});
}
}
return iInsert;
}
private void AddPeptideOrderContextMenu(ToolStrip menuStrip, int iInsert)
{
menuStrip.Items.Insert(iInsert, peptideOrderContextMenuItem);
if (peptideOrderContextMenuItem.DropDownItems.Count == 0)
{
peptideOrderContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
peptideOrderDocumentContextMenuItem,
peptideOrderRTContextMenuItem,
peptideOrderAreaContextMenuItem
});
}
}
private void timeGraphMenuItem_DropDownOpening(object sender, EventArgs e)
{
var types = Settings.Default.RTGraphTypes;
bool runToRunRegression = GraphChecked(_listGraphRetentionTime, types, GraphTypeSummary.run_to_run_regression);
bool scoreToRunRegression = GraphChecked(_listGraphRetentionTime, types, GraphTypeSummary.score_to_run_regression);
runToRunToolStripMenuItem.Checked = runToRunRegression;
scoreToRunToolStripMenuItem.Checked = scoreToRunRegression;
runToRunMenuItem.Checked = runToRunRegression;
scoreToRunMenuItem.Checked = scoreToRunRegression;
regressionMenuItem.Checked = runToRunRegression || scoreToRunRegression;
regressionContextMenuItem.Checked = runToRunRegression || scoreToRunRegression;
replicateComparisonMenuItem.Checked = replicateComparisonContextMenuItem.Checked =
GraphChecked(_listGraphRetentionTime, types, GraphTypeSummary.replicate);
timePeptideComparisonMenuItem.Checked = timePeptideComparisonContextMenuItem.Checked =
GraphChecked(_listGraphRetentionTime, types, GraphTypeSummary.peptide);
schedulingMenuItem.Checked = schedulingContextMenuItem.Checked =
GraphChecked(_listGraphRetentionTime, types, GraphTypeSummary.schedule);
}
private void regressionMenuItem_Click(object sender, EventArgs e)
{
ShowRTRegressionGraphScoreToRun();
}
private void fullReplicateComparisonToolStripMenuItem_Click(object sender, EventArgs e)
{
ShowRTRegressionGraphRunToRun();
}
public void ShowRTRegressionGraphScoreToRun()
{
Settings.Default.RTGraphTypes.Insert(0, GraphTypeSummary.score_to_run_regression);
ShowGraphRetentionTime(true, GraphTypeSummary.score_to_run_regression);
UpdateRetentionTimeGraph();
}
public void ShowRTRegressionGraphRunToRun()
{
Settings.Default.RTGraphTypes.Insert(0, GraphTypeSummary.run_to_run_regression);
ShowGraphRetentionTime(true, GraphTypeSummary.run_to_run_regression);
UpdateRetentionTimeGraph();
}
private void linearRegressionContextMenuItem_Click(object sender, EventArgs e)
{
ShowRegressionMethod(RegressionMethodRT.linear);
}
private void kernelDensityEstimationContextMenuItem_Click(object sender, EventArgs e)
{
ShowRegressionMethod(RegressionMethodRT.kde);
}
private void logRegressionContextMenuItem_Click(object sender, EventArgs e)
{
ShowRegressionMethod(RegressionMethodRT.log);
}
private void loessContextMenuItem_Click(object sender, EventArgs e)
{
ShowRegressionMethod(RegressionMethodRT.loess);
}
private void timeCorrelationContextMenuItem_Click(object sender, EventArgs e)
{
ShowPlotType(PlotTypeRT.correlation);
}
private void timeResidualsContextMenuItem_Click(object sender, EventArgs e)
{
ShowPlotType(PlotTypeRT.residuals);
}
private void timeTargetsContextMenuItem_Click(object sender, EventArgs e)
{
ShowPointsType(PointsTypeRT.targets);
}
private void targetsAt1FDRToolStripMenuItem_Click(object sender, EventArgs e)
{
if (RTLinearRegressionGraphPane.ShowReplicate != ReplicateDisplay.single &&
RTGraphController.GraphType == GraphTypeSummary.score_to_run_regression)
{
using (var dlg = new MultiButtonMsgDlg(
Resources.SkylineWindow_targetsAt1FDRToolStripMenuItem_Click_Showing_targets_at_1__FDR_will_set_the_replicate_display_type_to_single__Do_you_want_to_continue_,
MultiButtonMsgDlg.BUTTON_YES, MultiButtonMsgDlg.BUTTON_NO, false))
{
if (dlg.ShowDialog(this) != DialogResult.Yes)
return;
}
}
ShowSingleReplicate();
ShowPointsType(PointsTypeRT.targets_fdr);
}
private void timeStandardsContextMenuItem_Click(object sender, EventArgs e)
{
ShowPointsType(PointsTypeRT.standards);
}
private void timeDecoysContextMenuItem_Click(object sender, EventArgs e)
{
ShowPointsType(PointsTypeRT.decoys);
}
public void ShowPlotType(PlotTypeRT plotTypeRT)
{
RTGraphController.PlotType = plotTypeRT;
UpdateRetentionTimeGraph();
}
public void ShowPointsType(PointsTypeRT pointsTypeRT)
{
RTGraphController.PointsType = pointsTypeRT;
UpdateRetentionTimeGraph();
}
public void ShowRegressionMethod(RegressionMethodRT regressionMethod)
{
RTGraphController.RegressionMethod = regressionMethod;
UpdateRetentionTimeGraph();
}
private void timePeptideComparisonMenuItem_Click(object sender, EventArgs e)
{
ShowRTPeptideGraph();
}
public void ShowRTPeptideGraph()
{
Settings.Default.RTGraphTypes.Insert(0, GraphTypeSummary.peptide);
ShowGraphRetentionTime(true, GraphTypeSummary.peptide);
UpdateRetentionTimeGraph();
SynchronizeSummaryZooming();
}
private void showRTLegendContextMenuItem_Click(object sender, EventArgs e)
{
ShowRTLegend(!Settings.Default.ShowRetentionTimesLegend);
}
public void ShowRTLegend(bool show)
{
Settings.Default.ShowRetentionTimesLegend = show;
UpdateRetentionTimeGraph();
}
private void replicateComparisonMenuItem_Click(object sender, EventArgs e)
{
ShowRTReplicateGraph();
}
public void ShowRTReplicateGraph()
{
Settings.Default.RTGraphTypes.Insert(0, GraphTypeSummary.replicate);
ShowGraphRetentionTime(true, GraphTypeSummary.replicate);
UpdateRetentionTimeGraph();
SynchronizeSummaryZooming();
}
private void schedulingMenuItem_Click(object sender, EventArgs e)
{
ShowRTSchedulingGraph();
}
public void ShowRTSchedulingGraph()
{
Settings.Default.RTGraphTypes.Insert(0, GraphTypeSummary.schedule);
ShowGraphRetentionTime(true, GraphTypeSummary.schedule);
UpdateRetentionTimeGraph();
}
private void selectionContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowReplicateSelection = selectionContextMenuItem.Checked;
UpdateSummaryGraphs();
}
private void refineRTContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.RTRefinePeptides = refineRTContextMenuItem.Checked;
UpdateRetentionTimeGraph();
}
private void predictionRTContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.RTPredictorVisible = predictionRTContextMenuItem.Checked;
UpdateRetentionTimeGraph();
}
private void averageReplicatesContextMenuItem_Click(object sender, EventArgs e)
{
ShowAverageReplicates();
}
public void ShowAverageReplicates()
{
Settings.Default.ShowRegressionReplicateEnum = ReplicateDisplay.all.ToString();
UpdateSummaryGraphs();
}
private void singleReplicateRTContextMenuItem_Click(object sender, EventArgs e)
{
ShowSingleReplicate();
}
public void ShowSingleReplicate()
{
Settings.Default.ShowRegressionReplicateEnum = ReplicateDisplay.single.ToString();
// No CVs with single replicate data views
Settings.Default.ShowPeptideCV = false;
UpdateSummaryGraphs();
}
private void bestReplicateRTContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowRegressionReplicateEnum = ReplicateDisplay.best.ToString();
// No CVs with single replicate data views
Settings.Default.ShowPeptideCV = false;
UpdateSummaryGraphs();
}
private void replicatesRTContextMenuItem_DropDownOpening(object sender, EventArgs e)
{
ReplicateDisplay replicate = RTLinearRegressionGraphPane.ShowReplicate;
averageReplicatesContextMenuItem.Checked = (replicate == ReplicateDisplay.all);
singleReplicateRTContextMenuItem.Checked = (replicate == ReplicateDisplay.single);
bestReplicateRTContextMenuItem.Checked = (replicate == ReplicateDisplay.best);
}
private void setRTThresholdContextMenuItem_Click(object sender, EventArgs e)
{
ShowRegressionRTThresholdDlg();
}
public void ShowRegressionRTThresholdDlg()
{
using (var dlg = new RegressionRTThresholdDlg {Threshold = Settings.Default.RTResidualRThreshold})
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
Settings.Default.RTResidualRThreshold = dlg.Threshold;
UpdateRetentionTimeGraph();
}
}
}
private void createRTRegressionContextMenuItem_Click(object sender, EventArgs e)
{
CreateRegression();
}
public void CreateRegression()
{
var listRegression = Settings.Default.RetentionTimeList;
var regression = RTGraphController.RegressionRefined;
string name = Path.GetFileNameWithoutExtension(DocumentFilePath);
if (listRegression.ContainsKey(name))
{
int i = 2;
while (listRegression.ContainsKey(name + i))
i++;
name += i;
}
if (regression != null)
regression = (RetentionTimeRegression) regression.ChangeName(name);
using (var dlg = new EditRTDlg(listRegression) { Regression = regression })
{
dlg.ShowPeptides(true);
if (dlg.ShowDialog(this) == DialogResult.OK)
{
regression = dlg.Regression;
listRegression.Add(regression);
ModifyDocument(string.Format(Resources.SkylineWindow_CreateRegression_Set_regression__0__, regression.Name),
doc =>
doc.ChangeSettings(
doc.Settings.ChangePeptidePrediction(p => p.ChangeRetentionTime(regression))), AuditLogEntry.SettingsLogFunction);
}
}
}
private void chooseCalculatorContextMenuItem_DropDownOpening(object sender, EventArgs e)
{
SetupCalculatorChooser();
}
public void SetupCalculatorChooser()
{
while (!ReferenceEquals(chooseCalculatorContextMenuItem.DropDownItems[0], toolStripSeparatorCalculators))
chooseCalculatorContextMenuItem.DropDownItems.RemoveAt(0);
//If no calculator has been picked for use in the graph, get the best one.
var autoItem = new ToolStripMenuItem(Resources.SkylineWindow_SetupCalculatorChooser_Auto, null, delegate { ChooseCalculator(string.Empty); })
{
Checked = string.IsNullOrEmpty(Settings.Default.RTCalculatorName)
};
chooseCalculatorContextMenuItem.DropDownItems.Insert(0, autoItem);
int i = 0;
foreach (var calculator in Settings.Default.RTScoreCalculatorList)
{
string calculatorName = calculator.Name;
var menuItem = new ToolStripMenuItem(calculatorName, null, delegate { ChooseCalculator(calculatorName);})
{
Checked = Equals(calculatorName, Settings.Default.RTCalculatorName)
};
chooseCalculatorContextMenuItem.DropDownItems.Insert(i++, menuItem);
}
}
public void ChooseCalculator(string calculatorName)
{
Settings.Default.RTCalculatorName = calculatorName;
UpdateRetentionTimeGraph();
}
private void addCalculatorContextMenuItem_Click(object sender, EventArgs e)
{
var list = Settings.Default.RTScoreCalculatorList;
var calcNew = list.EditItem(this, null, list, null);
if (calcNew != null)
list.SetValue(calcNew);
}
private void updateCalculatorContextMenuItem_Click(object sender, EventArgs e)
{
ShowEditCalculatorDlg();
}
public void ShowEditCalculatorDlg()
{
var list = Settings.Default.RTScoreCalculatorList;
var regressionRT = RTGraphController.RegressionRefined;
if (regressionRT != null && list.CanEditItem(regressionRT.Calculator))
{
var calcOld = regressionRT.Calculator;
var calcNew = list.EditItem(this, calcOld, list, null);
if (calcNew != null && !Equals(calcNew, calcOld))
{
list.SetValue(calcNew);
var regressionRTDoc = DocumentUI.Settings.PeptideSettings.Prediction.RetentionTime;
if (regressionRTDoc != null && Equals(calcOld.Name, regressionRTDoc.Calculator.Name) &&
!Equals(calcNew, regressionRTDoc.Calculator))
{
ModifyDocument(string.Format(Resources.SkylineWindow_ShowEditCalculatorDlg_Update__0__calculator, calcNew.Name), doc =>
doc.ChangeSettings(doc.Settings.ChangePeptidePrediction(predict =>
predict.ChangeRetentionTime(predict.RetentionTime.ChangeCalculator(calcNew)))), AuditLogEntry.SettingsLogFunction);
}
}
}
}
private void removeRTOutliersContextMenuItem_Click(object sender, EventArgs e)
{
RemoveRTOutliers();
}
public void RemoveRTOutliers()
{
var outliers = RTGraphController.Outliers;
var outlierIds = new HashSet<int>();
foreach (var outlier in outliers)
outlierIds.Add(outlier.Id.GlobalIndex);
ModifyDocument(Resources.SkylineWindow_RemoveRTOutliers_Remove_retention_time_outliers,
doc => (SrmDocument) doc.RemoveAll(outlierIds),
docPair => AuditLogEntry.CreateCountChangeEntry(MessageType.removed_rt_outlier,
MessageType.removed_rt_outliers, docPair.OldDocumentType, RTGraphController.Outliers, outlier => MessageArgs.Create(AuditLogEntry.GetNodeName(docPair.OldDoc, outlier)), null));
}
private void removeRTContextMenuItem_Click(object sender, EventArgs e)
{
deleteMenuItem_Click(sender, e);
}
private void peptideRTValueMenuItem_DropDownOpening(object sender, EventArgs e)
{
RTPeptideValue rtValue = RTPeptideGraphPane.RTValue;
allRTValueContextMenuItem.Checked = (rtValue == RTPeptideValue.All);
timeRTValueContextMenuItem.Checked = (rtValue == RTPeptideValue.Retention);
fwhmRTValueContextMenuItem.Checked = (rtValue == RTPeptideValue.FWHM);
fwbRTValueContextMenuItem.Checked = (rtValue == RTPeptideValue.FWB);
}
/// <summary>
/// If the predicted retention time is auto calculated, add a "Show {Prediction} score" menu item.
/// If there are retention time alignments available for the specified chromFileInfoId, then adds
/// a "Align Times To {Specified File}" menu item to a context menu.
/// </summary>
private int InsertAlignmentMenuItems(ToolStripItemCollection items, ChromFileInfoId chromFileInfoId, int iInsert)
{
var predictRT = Document.Settings.PeptideSettings.Prediction.RetentionTime;
if (predictRT != null && predictRT.IsAutoCalculated)
{
var menuItem = new ToolStripMenuItem(string.Format(Resources.SkylineWindow_ShowCalculatorScoreFormat, predictRT.Calculator.Name), null,
(sender, eventArgs)=>AlignToRtPrediction=!AlignToRtPrediction)
{
Checked = AlignToRtPrediction,
};
items.Insert(iInsert++, menuItem);
}
if (null != chromFileInfoId && DocumentUI.Settings.HasResults &&
!DocumentUI.Settings.DocumentRetentionTimes.FileAlignments.IsEmpty)
{
foreach (var chromatogramSet in DocumentUI.Settings.MeasuredResults.Chromatograms)
{
var chromFileInfo = chromatogramSet.MSDataFileInfos
.FirstOrDefault(
chromFileInfoMatch =>
ReferenceEquals(chromFileInfoMatch.FileId, chromFileInfoId));
if (null == chromFileInfo)
{
continue;
}
string fileItemName = Path.GetFileNameWithoutExtension(SampleHelp.GetFileName(chromFileInfo.FilePath));
var menuItemText = string.Format(Resources.SkylineWindow_AlignTimesToFileFormat, fileItemName);
var alignToFileItem = new ToolStripMenuItem(menuItemText);
if (ReferenceEquals(chromFileInfoId, AlignToFile))
{
alignToFileItem.Click += (sender, eventArgs) => AlignToFile = null;
alignToFileItem.Checked = true;
}
else
{
alignToFileItem.Click += (sender, eventArgs) => AlignToFile = chromFileInfoId;
alignToFileItem.Checked = false;
}
items.Insert(iInsert++, alignToFileItem);
}
}
return iInsert;
}
private void allRTValueContextMenuItem_Click(object sender, EventArgs e)
{
// No CVs with all retention time values showing
Settings.Default.ShowPeptideCV = false;
ShowRTPeptideValue(RTPeptideValue.All);
}
private void timeRTValueContextMenuItem_Click(object sender, EventArgs e)
{
ShowRTPeptideValue(RTPeptideValue.Retention);
}
private void fwhmRTValueContextMenuItem_Click(object sender, EventArgs e)
{
ShowRTPeptideValue(RTPeptideValue.FWHM);
}
private void fwbRTValueContextMenuItem_Click(object sender, EventArgs e)
{
ShowRTPeptideValue(RTPeptideValue.FWB);
}
public void ShowRTPeptideValue(RTPeptideValue value)
{
Settings.Default.RTPeptideValue = value.ToString();
UpdateRetentionTimeGraph();
}
private void timePropsContextMenuItem_Click(object sender, EventArgs e)
{
ShowRTPropertyDlg(ContextMenuGraphSummary);
}
public void ShowRTPropertyDlg(GraphSummary graph)
{
if (graph.Type == GraphTypeSummary.schedule)
{
using (var dlg = new SchedulingGraphPropertyDlg())
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
UpdateRetentionTimeGraph();
}
}
}
else
{
using (var dlg = new RTChartPropertyDlg())
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
UpdateSummaryGraphs();
}
}
}
}
public void UpdateRetentionTimeGraph()
{
_listGraphRetentionTime.ForEach(g =>
{
try
{
g.UpdateUI();
}
catch (CalculatorException e)
{
MessageDlg.ShowException(this, e);
Settings.Default.RTCalculatorName = string.Empty;
}
});
}
private void retentionTimeAlignmentToolStripMenuItem_Click(object sender, EventArgs e)
{
ShowRetentionTimeAlignmentForm();
}
public AlignmentForm ShowRetentionTimeAlignmentForm()
{
var form = FormUtil.OpenForms.OfType<AlignmentForm>().FirstOrDefault();
if (form == null)
{
form = new AlignmentForm(this);
form.Show(this);
}
else
{
form.Activate();
}
return form;
}
#endregion
#region Peak area graph
public GraphSummary GraphPeakArea { get { return _listGraphPeakArea.FirstOrDefault(); } }
public void UpdateUIGraphPeakArea(bool visible)
{
var list = Settings.Default.AreaGraphTypes.ToArray();
ShowGraphPeakArea(visible);
if (!visible)
{
Settings.Default.AreaGraphTypes.Clear();
Settings.Default.AreaGraphTypes.AddRange(list);
}
}
public void ShowGraphPeakArea(bool show)
{
Settings.Default.AreaGraphTypes.ToList().ForEach(t => ShowGraphPeakArea(show, t));
}
public void ShowGraphPeakArea(bool show, GraphTypeSummary type)
{
ShowGraph(_listGraphPeakArea, show, type, CreateGraphPeakArea);
}
private GraphSummary CreateGraphPeakArea(GraphTypeSummary type)
{
if (type == GraphTypeSummary.invalid)
return null;
GraphSummary graph = new GraphSummary(type, this, new AreaGraphController(), SelectedResultsIndex);
graph.FormClosed += graphPeakArea_FormClosed;
graph.VisibleChanged += graphPeakArea_VisibleChanged;
graph.GraphControl.ZoomEvent += GraphControl_ZoomEvent;
graph.Toolbar = new AreaCVToolbar(graph);
_listGraphPeakArea.Insert(0, graph);
return graph;
}
private void DestroyGraphPeakArea(GraphSummary graph)
{
graph.FormClosed -= graphPeakArea_FormClosed;
graph.VisibleChanged -= graphPeakArea_VisibleChanged;
graph.HideOnClose = false;
graph.Close();
_listGraphPeakArea.Remove(graph);
Settings.Default.AreaGraphTypes.Remove(graph.Type);
}
private void graphPeakArea_VisibleChanged(object sender, EventArgs e)
{
var graph = (GraphSummary)sender;
if (graph.Visible)
{
Settings.Default.AreaGraphTypes.Insert(0, graph.Type);
_listGraphPeakArea.Remove(graph);
_listGraphPeakArea.Insert(0, graph);
}
else if (graph.IsHidden)
{
Settings.Default.AreaGraphTypes.Remove(graph.Type);
}
}
private void graphPeakArea_FormClosed(object sender, FormClosedEventArgs e)
{
GraphSummary graph = (GraphSummary)sender;
_listGraphPeakArea.Remove(graph);
Settings.Default.AreaGraphTypes.Remove(graph.Type);
}
private void BuildAreaGraphMenu(GraphSummary graphSummary, ToolStrip menuStrip, Point mousePt)
{
// Store original menuitems in an array, and insert a separator
ToolStripItem[] items = new ToolStripItem[menuStrip.Items.Count];
int iUnzoom = -1;
for (int i = 0; i < items.Length; i++)
{
items[i] = menuStrip.Items[i];
string tag = (string)items[i].Tag;
if (tag == @"unzoom")
iUnzoom = i;
}
if (iUnzoom != -1)
menuStrip.Items.Insert(iUnzoom, toolStripSeparator25);
// Insert skyline specific menus
var set = Settings.Default;
int iInsert = 0;
menuStrip.Items.Insert(iInsert++, areaGraphContextMenuItem);
if (areaGraphContextMenuItem.DropDownItems.Count == 0)
{
areaGraphContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
areaReplicateComparisonContextMenuItem,
areaPeptideComparisonContextMenuItem,
areaCVHistogramContextMenuItem,
areaCVHistogram2DContextMenuItem
});
}
var graphType = graphSummary.Type;
if (graphType == GraphTypeSummary.replicate)
{
menuStrip.Items.Insert(iInsert++, graphTypeToolStripMenuItem);
if (graphTypeToolStripMenuItem.DropDownItems.Count == 0)
{
graphTypeToolStripMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
barAreaGraphDisplayTypeMenuItem,
lineAreaGraphDisplayTypeMenuItem
});
}
}
menuStrip.Items.Insert(iInsert++, toolStripSeparator16);
var isHistogram = graphType == GraphTypeSummary.histogram || graphType == GraphTypeSummary.histogram2d;
if (isHistogram)
AddGroupByMenuItems(menuStrip, groupReplicatesByContextMenuItem, SetAreaCVGroup, true, AreaGraphController.GroupByGroup, ref iInsert);
else
AddTransitionContextMenu(menuStrip, iInsert++);
if (graphType == GraphTypeSummary.replicate)
{
iInsert = AddReplicateOrderAndGroupByMenuItems(menuStrip, iInsert);
areaNormalizeTotalContextMenuItem.Checked =
(AreaGraphController.AreaView == AreaNormalizeToView.area_percent_view);
menuStrip.Items.Insert(iInsert++, areaNormalizeContextMenuItem);
if (areaNormalizeContextMenuItem.DropDownItems.Count == 0)
{
areaNormalizeContextMenuItem.DropDownItems.AddRange(new[]
{
areaNormalizeGlobalContextMenuItem,
areaNormalizeMaximumContextMenuItem,
areaNormalizeTotalContextMenuItem,
(ToolStripItem)toolStripSeparator40,
areaNormalizeNoneContextMenuItem
});
}
var areaReplicateGraphPane = graphSummary.GraphPanes.FirstOrDefault() as AreaReplicateGraphPane;
if (areaReplicateGraphPane != null)
{
// If the area replicate graph is being displayed and it shows a legend,
// display the "Legend" option
if (areaReplicateGraphPane.CanShowPeakAreaLegend)
{
showPeakAreaLegendContextMenuItem.Checked = set.ShowPeakAreaLegend;
menuStrip.Items.Insert(iInsert++, showPeakAreaLegendContextMenuItem);
}
// If the area replicate graph is being displayed and it can show a library,
// display the "Show Library" option
var expectedVisible = areaReplicateGraphPane.ExpectedVisible;
if (expectedVisible != AreaExpectedValue.none)
{
showLibraryPeakAreaContextMenuItem.Checked = set.ShowLibraryPeakArea;
showLibraryPeakAreaContextMenuItem.Text = expectedVisible == AreaExpectedValue.library
? Resources.SkylineWindow_BuildAreaGraphMenu_Show_Library
: Resources.SkylineWindow_BuildAreaGraphMenu_Show_Expected;
menuStrip.Items.Insert(iInsert++, showLibraryPeakAreaContextMenuItem);
}
// If the area replicate graph is being displayed and it can show dot products,
// display the "Show Dot Product" option
if (areaReplicateGraphPane.CanShowDotProduct)
{
showDotProductToolStripMenuItem.Checked = set.ShowDotProductPeakArea;
menuStrip.Items.Insert(iInsert++, showDotProductToolStripMenuItem);
}
}
}
else if (graphType == GraphTypeSummary.peptide)
{
AddPeptideOrderContextMenu(menuStrip, iInsert++);
iInsert = AddReplicatesContextMenu(menuStrip, iInsert);
AddScopeContextMenu(menuStrip, iInsert++);
}
if (isHistogram)
{
bool trained = Document.Settings.PeptideSettings.Integration.PeakScoringModel.IsTrained;
bool decoys = Document.Settings.PeptideSettings.Integration.PeakScoringModel.UsesDecoys;
if (trained || decoys)
{
UpdateAreaPointsTypeMenuItems();
if (pointsToolStripMenuItem.DropDownItems.Count == 0)
{
pointsToolStripMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
areaCVtargetsToolStripMenuItem,
areaCVdecoysToolStripMenuItem
});
}
menuStrip.Items.Insert(iInsert++, pointsToolStripMenuItem);
}
UpdateAreaCVTransitionsMenuItems();
if (areaCVTransitionsToolStripMenuItem.DropDownItems.Count == 0)
{
areaCVTransitionsToolStripMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
areaCVAllTransitionsToolStripMenuItem,
areaCVCountTransitionsToolStripMenuItem,
areaCVBestTransitionsToolStripMenuItem,
toolStripSeparator58,
areaCVPrecursorsToolStripMenuItem,
areaCVProductsToolStripMenuItem
});
}
if (areaCVCountTransitionsToolStripMenuItem.DropDownItems.Count == 0)
{
var maxTransCount = Document.MoleculeTransitionGroups
.Select(g => g.TransitionCount).Append(0).Max();
for (int i = 1; i <= maxTransCount; i++)
{
var tmp = new ToolStripMenuItem(i.ToString(), null,
areaCVCountTransitionsToolStripMenuItem_Click)
{
Checked = AreaGraphController.AreaCVTransitionsCount == i
};
areaCVCountTransitionsToolStripMenuItem.DropDownItems.Add(tmp);
}
}
menuStrip.Items.Insert(iInsert++, areaCVTransitionsToolStripMenuItem);
UpdateAreaBinWidthMenuItems();
if (areaCVbinWidthToolStripMenuItem.DropDownItems.Count == 0)
{
areaCVbinWidthToolStripMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
areaCV05binWidthToolStripMenuItem,
areaCV10binWidthToolStripMenuItem,
areaCV15binWidthToolStripMenuItem,
areaCV20binWidthToolStripMenuItem
});
}
menuStrip.Items.Insert(iInsert++, areaCVbinWidthToolStripMenuItem);
areaCVNormalizedToToolStripMenuItem.DropDownItems.Clear();
UpdateAreaNormalizationMenuItems();
areaCVNormalizedToToolStripMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
areaCVGlobalStandardsToolStripMenuItem,
areaCVMediansToolStripMenuItem,
toolStripSeparator54,
areaCVNoneToolStripMenuItem
});
menuStrip.Items.Insert(iInsert++, areaCVNormalizedToToolStripMenuItem);
if (graphType == GraphTypeSummary.histogram2d)
{
areaCVLogScaleToolStripMenuItem.Checked = Settings.Default.AreaCVLogScale;
menuStrip.Items.Insert(iInsert++, areaCVLogScaleToolStripMenuItem);
}
selectionContextMenuItem.Checked = set.ShowReplicateSelection;
menuStrip.Items.Insert(iInsert++, selectionContextMenuItem);
menuStrip.Items.Insert(iInsert++, toolStripSeparator57);
menuStrip.Items.Insert(iInsert++, removeAboveCVCutoffToolStripMenuItem);
}
else
{
if (graphType == GraphTypeSummary.peptide || !string.IsNullOrEmpty(Settings.Default.GroupByReplicateAnnotation))
{
menuStrip.Items.Insert(iInsert++, peptideCvsContextMenuItem);
peptideCvsContextMenuItem.Checked = set.ShowPeptideCV;
}
menuStrip.Items.Insert(iInsert++, peptideLogScaleContextMenuItem);
peptideLogScaleContextMenuItem.Checked = set.AreaLogScale;
selectionContextMenuItem.Checked = set.ShowReplicateSelection;
menuStrip.Items.Insert(iInsert++, selectionContextMenuItem);
synchronizeSummaryZoomingContextMenuItem.Checked = set.SynchronizeSummaryZooming;
menuStrip.Items.Insert(iInsert++, synchronizeSummaryZoomingContextMenuItem);
}
menuStrip.Items.Insert(iInsert++, toolStripSeparator24);
menuStrip.Items.Insert(iInsert++, areaPropsContextMenuItem);
menuStrip.Items.Insert(iInsert, toolStripSeparator28);
if (!isHistogram)
{
var isotopeLabelType = graphSummary.GraphPaneFromPoint(mousePt) != null
? graphSummary.GraphPaneFromPoint(mousePt).PaneKey.IsotopeLabelType
: null;
AddApplyRemovePeak(menuStrip, removePeakGraphMenuItem.DropDownItems, isotopeLabelType, -1, ref iInsert);
}
// Remove some ZedGraph menu items not of interest
foreach (var item in items)
{
string tag = (string)item.Tag;
if (tag == @"set_default" || tag == @"show_val")
menuStrip.Items.Remove(item);
}
}
private void UpdateAreaCVTransitionsMenuItems()
{
areaCVAllTransitionsToolStripMenuItem.Checked = AreaGraphController.AreaCVTransitions == AreaCVTransitions.all;
areaCVBestTransitionsToolStripMenuItem.Checked = AreaGraphController.AreaCVTransitions == AreaCVTransitions.best;
var selectedCount = AreaGraphController.AreaCVTransitionsCount;
for (int i = 0; i < areaCVCountTransitionsToolStripMenuItem.DropDownItems.Count; i++)
{
((ToolStripMenuItem)areaCVCountTransitionsToolStripMenuItem.DropDownItems[i]).Checked =
selectedCount - 1 == i;
}
areaCVPrecursorsToolStripMenuItem.Checked = AreaGraphController.AreaCVMsLevel == AreaCVMsLevel.precursors;
areaCVProductsToolStripMenuItem.Checked = AreaGraphController.AreaCVMsLevel == AreaCVMsLevel.products;
}
private void areaCVAllTransitionsToolStripMenuItem_Click(object sender, EventArgs e)
{
SetAreaCVTransitions(AreaCVTransitions.all, -1);
}
private void areaCVCountTransitionsToolStripMenuItem_Click(object sender, EventArgs e)
{
var item = (ToolStripMenuItem)sender;
int selectedIdx = ((ToolStripMenuItem)item.OwnerItem).DropDownItems.IndexOf(item) + 1;
SetAreaCVTransitions(AreaCVTransitions.count, selectedIdx);
}
private void areaCVBestTransitionsToolStripMenuItem_Click(object sender, EventArgs e)
{
SetAreaCVTransitions(AreaCVTransitions.best, -1);
}
public void SetAreaCVTransitions(AreaCVTransitions transitions, int count)
{
AreaGraphController.AreaCVTransitionsCount = count;
AreaGraphController.AreaCVTransitions = transitions;
UpdatePeakAreaGraph();
}
private void areaCVPrecursorsToolStripMenuItem_Click(object sender, EventArgs e)
{
SetAreaCVMsLevel(AreaCVMsLevel.precursors);
}
private void areaCVProductsToolStripMenuItem_Click(object sender, EventArgs e)
{
SetAreaCVMsLevel(AreaCVMsLevel.products);
}
public void SetAreaCVMsLevel(AreaCVMsLevel msLevel)
{
AreaGraphController.AreaCVMsLevel = msLevel;
UpdatePeakAreaGraph();
}
private void UpdateAreaBinWidthMenuItems()
{
var factor = AreaGraphController.GetAreaCVFactorToPercentage();
var unit = Settings.Default.AreaCVShowDecimals ? string.Empty : @"%";
areaCV05binWidthToolStripMenuItem.Text = 0.5 / factor + unit;
areaCV10binWidthToolStripMenuItem.Text = 1.0 / factor + unit;
areaCV15binWidthToolStripMenuItem.Text = 1.5 / factor + unit;
areaCV20binWidthToolStripMenuItem.Text = 2.0 / factor + unit;
var binwidth = Settings.Default.AreaCVHistogramBinWidth;
areaCV05binWidthToolStripMenuItem.Checked = binwidth == 0.5 / factor;
areaCV10binWidthToolStripMenuItem.Checked = binwidth == 1.0 / factor;
areaCV15binWidthToolStripMenuItem.Checked = binwidth == 1.5 / factor;
areaCV20binWidthToolStripMenuItem.Checked = binwidth == 2.0 / factor;
}
private void barAreaGraphTypeMenuItem_Click(object sender, EventArgs e)
{
SetAreaGraphDisplayType(AreaGraphDisplayType.bars);
}
private void lineAreaGraphTypeMenuItem_Click(object sender, EventArgs e)
{
SetAreaGraphDisplayType(AreaGraphDisplayType.lines);
}
public void SetAreaGraphDisplayType(AreaGraphDisplayType displayType)
{
AreaGraphController.GraphDisplayType = displayType;
barAreaGraphDisplayTypeMenuItem.Checked = (displayType == AreaGraphDisplayType.bars);
lineAreaGraphDisplayTypeMenuItem.Checked = (displayType == AreaGraphDisplayType.lines);
UpdatePeakAreaGraph();
}
public void SynchronizeSummaryZooming(GraphSummary graphSummary = null, ZoomState zoomState = null)
{
var activeGraphSummary = graphSummary ?? dockPanel.ActiveContent as GraphSummary;
if (!Settings.Default.SynchronizeSummaryZooming || activeGraphSummary == null)
{
return;
}
var activePane = activeGraphSummary.GraphControl.GraphPane;
GraphSummary[] graphSummaries = new List<GraphSummary>(_listGraphMassError.Concat(_listGraphPeakArea.Concat(_listGraphRetentionTime))).ToArray();
// Find the correct GraphSummary
int index = graphSummaries.IndexOf(g => ReferenceEquals(g, activeGraphSummary));
// If zoomstate is null, we use the current state of the active pane
var xScaleState = zoomState == null ? new ScaleState(activePane.XAxis) : zoomState.XAxis;
var x2ScaleState = zoomState == null ? new ScaleState(activePane.X2Axis) : zoomState.X2Axis;
double add = 0.0;
// If the expected value (library) is visible the zoom has to be shifted
if (activePane is AreaReplicateGraphPane && (activePane as AreaReplicateGraphPane).IsExpectedVisible)
add = -1.0;
for (int i = 0; i < graphSummaries.Length; ++i)
{
// Make sure we are not syncing the same graph or graphs of different types
if (i != index && graphSummaries[i] != null && graphSummaries[i].Type == graphSummaries[index].Type && graphSummaries[i].Visible)
{
bool isExpectedVisible = graphSummaries[i].GraphControl.GraphPane is AreaReplicateGraphPane && ((AreaReplicateGraphPane)graphSummaries[i].GraphControl.GraphPane).IsExpectedVisible;
if (isExpectedVisible)
++add;
graphSummaries[i].GraphControl.GraphPane.XAxis.Scale.Min = xScaleState.Min + add;
graphSummaries[i].GraphControl.GraphPane.XAxis.Scale.Max = xScaleState.Max + add;
graphSummaries[i].GraphControl.GraphPane.X2Axis.Scale.Min = x2ScaleState.Min + add;
graphSummaries[i].GraphControl.GraphPane.X2Axis.Scale.Max = x2ScaleState.Max + add;
if (isExpectedVisible)
--add;
graphSummaries[i].UpdateUI(false);
}
}
}
void synchronizeSummaryZoomingContextMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.SynchronizeSummaryZooming = synchronizeSummaryZoomingContextMenuItem.Checked;
SynchronizeSummaryZooming();
}
void GraphControl_ZoomEvent(ZedGraphControl sender, ZoomState oldState, ZoomState newState, PointF mousePosition)
{
// We pass in a GraphSummary here because sometimes dockPanel.ActiveContent is not the graph the user is zooming in on
GraphSummary[] graphSummaries = new List<GraphSummary>(_listGraphMassError.Concat(_listGraphPeakArea.Concat(_listGraphRetentionTime))).ToArray();
SynchronizeSummaryZooming(graphSummaries.FirstOrDefault(gs => gs != null && ReferenceEquals(gs.GraphControl, sender)), newState);
}
private void AddGroupByMenuItems(ToolStrip menuStrip, ToolStripDropDownItem item, Action<ReplicateValue> clickHandler, bool includeAll, string checkedValue, ref int iInsert)
{
var replicateValues = ReplicateValue.GetGroupableReplicateValues(Document).ToArray();
if (!replicateValues.Any())
{
return;
}
item.DropDownItems.Clear();
if (includeAll)
{
item.DropDownItems.Add(new ToolStripMenuItem(Resources.SkylineWindow_AddGroupByMenuItems_All_Replicates,
null, (sender, args)=>clickHandler(null)) {Checked = string.IsNullOrEmpty(checkedValue)});
}
foreach (var g in replicateValues)
{
var subItem = new ToolStripMenuItem(g.Title, null, (sender, args)=>clickHandler(g))
{
Checked = checkedValue == g.ToPersistedString(),
};
item.DropDownItems.Add(subItem);
}
menuStrip?.Items.Insert(iInsert++, item);
}
public void SetGroupApplyToBy(ReplicateValue replicateValue)
{
Settings.Default.GroupApplyToBy = replicateValue?.ToPersistedString();
}
private int AddReplicateOrderAndGroupByMenuItems(ToolStrip menuStrip, int iInsert)
{
string currentGroupBy = SummaryReplicateGraphPane.GroupByReplicateAnnotation;
var groupByValues = ReplicateValue.GetGroupableReplicateValues(DocumentUI).ToArray();
if (groupByValues.Length == 0)
currentGroupBy = null;
// If not grouped by an annotation, show the order-by menuitem
if (string.IsNullOrEmpty(currentGroupBy))
{
var orderByReplicateAnnotationDef = groupByValues.FirstOrDefault(
value => SummaryReplicateGraphPane.OrderByReplicateAnnotation == value.ToPersistedString());
menuStrip.Items.Insert(iInsert++, replicateOrderContextMenuItem);
replicateOrderContextMenuItem.DropDownItems.Clear();
replicateOrderContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
replicateOrderDocumentContextMenuItem,
replicateOrderAcqTimeContextMenuItem
});
replicateOrderDocumentContextMenuItem.Checked
= null == orderByReplicateAnnotationDef &&
SummaryReplicateOrder.document == SummaryReplicateGraphPane.ReplicateOrder;
replicateOrderAcqTimeContextMenuItem.Checked
= null == orderByReplicateAnnotationDef &&
SummaryReplicateOrder.time == SummaryReplicateGraphPane.ReplicateOrder;
foreach (var replicateValue in groupByValues)
{
replicateOrderContextMenuItem.DropDownItems.Add(OrderByReplicateAnnotationMenuItem(
replicateValue, SummaryReplicateGraphPane.OrderByReplicateAnnotation));
}
}
if (groupByValues.Length > 0)
{
menuStrip.Items.Insert(iInsert++, groupReplicatesByContextMenuItem);
groupReplicatesByContextMenuItem.DropDownItems.Clear();
groupReplicatesByContextMenuItem.DropDownItems.Add(groupByReplicateContextMenuItem);
groupByReplicateContextMenuItem.Checked = string.IsNullOrEmpty(currentGroupBy);
foreach (var replicateValue in groupByValues)
{
groupReplicatesByContextMenuItem.DropDownItems
.Add(GroupByReplicateAnnotationMenuItem(replicateValue, currentGroupBy));
}
}
return iInsert;
}
public ToolStripMenuItem ReplicateOrderContextMenuItem
{
get
{
return replicateOrderContextMenuItem;
}
}
private ToolStripMenuItem GroupByReplicateAnnotationMenuItem(ReplicateValue replicateValue, string groupBy)
{
return new ToolStripMenuItem(replicateValue.Title, null, (sender, eventArgs)=>GroupByReplicateValue(replicateValue))
{
Checked = replicateValue.ToPersistedString() == groupBy
};
}
private ToolStripMenuItem OrderByReplicateAnnotationMenuItem(ReplicateValue replicateValue, string currentOrderBy)
{
return new ToolStripMenuItem(replicateValue.Title, null,
(sender, eventArgs) => OrderByReplicateAnnotation(replicateValue))
{
Checked = replicateValue.ToPersistedString() == currentOrderBy
};
}
private void removeAboveCVCutoffToolStripMenuItem_Click(object sender, EventArgs e)
{
RemoveAboveCVCutoff(ContextMenuGraphSummary);
}
public void RemoveAboveCVCutoff(GraphSummary graphSummary)
{
var pane = graphSummary.GraphPanes.First() as IAreaCVHistogramInfo;
if (pane == null ||
(graphSummary.Type != GraphTypeSummary.histogram && graphSummary.Type != GraphTypeSummary.histogram2d))
return;
var cutoff = Settings.Default.AreaCVCVCutoff / AreaGraphController.GetAreaCVFactorToDecimal();
// Create a set of everything that should remain, so that peptides excluded by
// the q value cut-off will also be removed
var ids = new HashSet<int>(pane.CurrentData.Data.Where(d => d.CV < cutoff)
.SelectMany(d => d.PeptideAnnotationPairs)
.Select(pair => pair.TransitionGroup.Id.GlobalIndex));
var nodeCount = 0;
// Remove everything not in the set
ModifyDocument(Resources.SkylineWindow_RemoveAboveCVCutoff_Remove_peptides_above_CV_cutoff, doc =>
{
var setRemove = AreaCVRefinementData.IndicesToRemove(doc, ids);
nodeCount = setRemove.Count;
return (SrmDocument)doc.RemoveAll(setRemove, null, (int) SrmDocument.Level.Molecules);
}, docPair => AuditLogEntry.CreateSimpleEntry(nodeCount == 1 ? MessageType.removed_peptide_above_cutoff : MessageType.removed_peptides_above_cutoff, docPair.OldDocumentType,
nodeCount, Settings.Default.AreaCVCVCutoff * AreaGraphController.GetAreaCVFactorToPercentage()));
}
public void SetAreaCVGroup(ReplicateValue replicateValue)
{
AreaGraphController.GroupByGroup = replicateValue?.ToPersistedString();
if (null == replicateValue)
AreaGraphController.GroupByAnnotation = null;
UpdatePeakAreaGraph();
}
public void SetAreaCVAnnotation(object annotationValue, bool update = true)
{
AreaGraphController.GroupByAnnotation = annotationValue;
if(update)
UpdatePeakAreaGraph();
}
private void areaCVtargetsToolStripMenuItem_Click(object sender, EventArgs e)
{
SetAreaCVPointsType(PointsTypePeakArea.targets);
}
private void areaCVdecoysToolStripMenuItem_Click(object sender, EventArgs e)
{
SetAreaCVPointsType(PointsTypePeakArea.decoys);
}
public void SetAreaCVPointsType(PointsTypePeakArea pointsType)
{
AreaGraphController.PointsType = pointsType;
UpdatePeakAreaGraph();
}
private void UpdateAreaPointsTypeMenuItems()
{
var pointsType = AreaGraphController.PointsType;
var shouldUseQValues = AreaGraphController.ShouldUseQValues(Document);
var decoys = Document.Settings.PeptideSettings.Integration.PeakScoringModel.UsesDecoys;
if (!decoys && pointsType == PointsTypePeakArea.decoys)
{
pointsType = AreaGraphController.PointsType = PointsTypePeakArea.targets;
}
areaCVtargetsToolStripMenuItem.Checked = pointsType == PointsTypePeakArea.targets;
areaCVtargetsToolStripMenuItem.Text = shouldUseQValues ? string.Format(Resources.SkylineWindow_UpdateAreaPointsTypeMenuItems_Targets_at__0___FDR, Settings.Default.AreaCVQValueCutoff * 100.0) : Resources.SkylineWindow_UpdateAreaPointsTypeMenuItems_Targets;
areaCVdecoysToolStripMenuItem.Visible = decoys;
areaCVdecoysToolStripMenuItem.Checked = pointsType == PointsTypePeakArea.decoys;
}
private void areaGraphMenuItem_DropDownOpening(object sender, EventArgs e)
{
var types = Settings.Default.AreaGraphTypes;
areaReplicateComparisonMenuItem.Checked = areaReplicateComparisonContextMenuItem.Checked = GraphChecked(_listGraphPeakArea, types, GraphTypeSummary.replicate);
areaPeptideComparisonMenuItem.Checked = areaPeptideComparisonContextMenuItem.Checked = GraphChecked(_listGraphPeakArea, types, GraphTypeSummary.peptide);
areaCVHistogramMenuItem.Checked = areaCVHistogramContextMenuItem.Checked = GraphChecked(_listGraphPeakArea, types, GraphTypeSummary.histogram);
areaCVHistogram2DMenuItem.Checked = areaCVHistogram2DContextMenuItem.Checked = GraphChecked(_listGraphPeakArea, types, GraphTypeSummary.histogram2d);
}
private void areaCV05binWidthToolStripMenuItem_Click(object sender, EventArgs e)
{
var factor = AreaGraphController.GetAreaCVFactorToPercentage();
SetAreaCVBinWidth(0.5 / factor);
}
private void areaCV10binWidthToolStripMenuItem_Click(object sender, EventArgs e)
{
var factor = AreaGraphController.GetAreaCVFactorToPercentage();
SetAreaCVBinWidth(1.0 / factor);
}
private void areaCV15binWidthToolStripMenuItem_Click(object sender, EventArgs e)
{
var factor = AreaGraphController.GetAreaCVFactorToPercentage();
SetAreaCVBinWidth(1.5 / factor);
}
private void areaCV20binWidthToolStripMenuItem_Click(object sender, EventArgs e)
{
var factor = AreaGraphController.GetAreaCVFactorToPercentage();
SetAreaCVBinWidth(2.0 / factor);
}
public void SetAreaCVBinWidth(double binWidth)
{
Settings.Default.AreaCVHistogramBinWidth = binWidth;
UpdatePeakAreaGraph();
}
private void areaReplicateComparisonMenuItem_Click(object sender, EventArgs e)
{
ShowPeakAreaReplicateComparison();
}
public void ShowPeakAreaReplicateComparison()
{
Settings.Default.AreaGraphTypes.Insert(0, GraphTypeSummary.replicate);
ShowGraphPeakArea(true, GraphTypeSummary.replicate);
UpdatePeakAreaGraph();
SynchronizeSummaryZooming();
}
private void areaPeptideComparisonMenuItem_Click(object sender, EventArgs e)
{
ShowPeakAreaPeptideGraph();
}
private void areaCVLogScaleToolStripMenuItem_Click(object sender, EventArgs e)
{
EnableAreaCVLogScale(!Settings.Default.AreaCVLogScale);
}
public void EnableAreaCVLogScale(bool enabled)
{
Settings.Default.AreaCVLogScale = enabled;
UpdatePeakAreaGraph();
}
private void UpdateAreaNormalizationMenuItems()
{
var mods = DocumentUI.Settings.PeptideSettings.Modifications;
var standardTypes = mods.RatioInternalStandardTypes;
if (mods.HasHeavyModifications)
{
for (var i = 0; i < standardTypes.Count; i++)
{
var item = new ToolStripMenuItem(standardTypes[i].Title, null, areaCVHeavyModificationToolStripMenuItem_Click)
{
Checked = AreaGraphController.AreaCVRatioIndex == i && AreaGraphController.NormalizationMethod == AreaCVNormalizationMethod.ratio
};
areaCVNormalizedToToolStripMenuItem.DropDownItems.Insert(i, item);
}
}
areaCVMediansToolStripMenuItem.Checked = AreaGraphController.NormalizationMethod == AreaCVNormalizationMethod.medians;
areaCVGlobalStandardsToolStripMenuItem.Visible = DocumentUI.Settings.HasGlobalStandardArea;
areaCVGlobalStandardsToolStripMenuItem.Checked = AreaGraphController.NormalizationMethod == AreaCVNormalizationMethod.global_standards;
areaCVNoneToolStripMenuItem.Checked = AreaGraphController.NormalizationMethod == AreaCVNormalizationMethod.none;
}
private void areaCVHeavyModificationToolStripMenuItem_Click(object sender, EventArgs e)
{
var item = (ToolStripMenuItem) sender;
int index = ((ToolStripMenuItem)item.OwnerItem).DropDownItems.IndexOf(item);
SetNormalizationMethod(AreaCVNormalizationMethod.ratio, index);
}
private void areaCVGlobalStandardsToolStripMenuItem_Click(object sender, EventArgs e)
{
SetNormalizationMethod(AreaCVNormalizationMethod.global_standards);
}
private void areaCVMediansToolStripMenuItem_Click(object sender, EventArgs e)
{
SetNormalizationMethod(AreaCVNormalizationMethod.medians);
}
private void areaCVNoneToolStripMenuItem_Click(object sender, EventArgs e)
{
SetNormalizationMethod(AreaCVNormalizationMethod.none);
}
public void SetNormalizationMethod(AreaCVNormalizationMethod method, int ratioIndex = -1, bool update = true)
{
AreaGraphController.NormalizationMethod = method;
AreaGraphController.AreaCVRatioIndex = ratioIndex;
if(update)
UpdatePeakAreaGraph();
}
public void ShowPeakAreaPeptideGraph()
{
Settings.Default.AreaGraphTypes.Insert(0, GraphTypeSummary.peptide);
ShowGraphPeakArea(true, GraphTypeSummary.peptide);
UpdatePeakAreaGraph();
SynchronizeSummaryZooming();
}
private void areaCVHistogramToolStripMenuItem1_Click(object sender, EventArgs e)
{
ShowPeakAreaCVHistogram();
}
public void ShowPeakAreaCVHistogram()
{
Settings.Default.AreaGraphTypes.Insert(0, GraphTypeSummary.histogram);
ShowGraphPeakArea(true, GraphTypeSummary.histogram);
UpdatePeakAreaGraph();
}
private void areaCVHistogram2DToolStripMenuItem1_Click(object sender, EventArgs e)
{
ShowPeakAreaCVHistogram2D();
}
public void ShowPeakAreaCVHistogram2D()
{
Settings.Default.AreaGraphTypes.Insert(0, GraphTypeSummary.histogram2d);
ShowGraphPeakArea(true, GraphTypeSummary.histogram2d);
UpdatePeakAreaGraph();
}
private void replicateOrderDocumentContextMenuItem_Click(object sender, EventArgs e)
{
ShowReplicateOrder(SummaryReplicateOrder.document);
}
private void replicateOrderAcqTimeContextMenuItem_Click(object sender, EventArgs e)
{
ShowReplicateOrder(SummaryReplicateOrder.time);
}
public void ShowReplicateOrder(SummaryReplicateOrder order)
{
SummaryReplicateGraphPane.ReplicateOrder = order;
SummaryReplicateGraphPane.OrderByReplicateAnnotation = null;
UpdateSummaryGraphs();
}
private void groupByReplicateContextMenuItem_Click(object sender, EventArgs e)
{
GroupByReplicateValue(null);
}
public void GroupByReplicateValue(ReplicateValue replicateValue)
{
SummaryReplicateGraphPane.GroupByReplicateAnnotation = replicateValue?.ToPersistedString();
UpdateSummaryGraphs();
}
public void GroupByReplicateAnnotation(string annotationName)
{
SummaryReplicateGraphPane.GroupByReplicateAnnotation =
DocumentAnnotations.ANNOTATION_PREFIX + annotationName;
UpdateSummaryGraphs();
}
public void OrderByReplicateAnnotation(ReplicateValue replicateValue)
{
SummaryReplicateGraphPane.OrderByReplicateAnnotation = replicateValue.ToPersistedString();
UpdateSummaryGraphs();
}
private void scopeContextMenuItem_DropDownOpening(object sender, EventArgs e)
{
var areaScope = AreaGraphController.AreaScope;
documentScopeContextMenuItem.Checked = (areaScope == AreaScope.document);
proteinScopeContextMenuItem.Checked = (areaScope == AreaScope.protein);
}
private void documentScopeContextMenuItem_Click(object sender, EventArgs e)
{
AreaScopeTo(AreaScope.document);
}
private void proteinScopeContextMenuItem_Click(object sender, EventArgs e)
{
AreaScopeTo(AreaScope.protein);
}
public void AreaScopeTo(AreaScope areaScope)
{
AreaGraphController.AreaScope = areaScope;
UpdateSummaryGraphs();
}
private void peptideOrderContextMenuItem_DropDownOpening(object sender, EventArgs e)
{
SummaryPeptideOrder peptideOrder = SummaryPeptideGraphPane.PeptideOrder;
peptideOrderDocumentContextMenuItem.Checked = (peptideOrder == SummaryPeptideOrder.document);
peptideOrderRTContextMenuItem.Checked = (peptideOrder == SummaryPeptideOrder.time);
peptideOrderAreaContextMenuItem.Checked = (peptideOrder == SummaryPeptideOrder.area);
peptideOrderMassErrorContextMenuItem.Checked = (peptideOrder == SummaryPeptideOrder.mass_error);
}
private void peptideOrderDocumentContextMenuItem_Click(object sender, EventArgs e)
{
ShowPeptideOrder(SummaryPeptideOrder.document);
}
private void peptideOrderRTContextMenuItem_Click(object sender, EventArgs e)
{
ShowPeptideOrder(SummaryPeptideOrder.time);
}
private void peptideOrderAreaContextMenuItem_Click(object sender, EventArgs e)
{
ShowPeptideOrder(SummaryPeptideOrder.area);
}
private void peptideOrderMassErrorContextMenuItem_Click(object sender, EventArgs e)
{
ShowPeptideOrder(SummaryPeptideOrder.mass_error);
}
public void ShowPeptideOrder(SummaryPeptideOrder order)
{
SummaryPeptideGraphPane.PeptideOrder = order;
UpdateSummaryGraphs();
}
public void NormalizeAreaGraphTo(AreaNormalizeToView areaView)
{
AreaGraphController.AreaView = areaView;
if (AreaGraphController.AreaView == AreaNormalizeToView.area_percent_view ||
AreaGraphController.AreaView == AreaNormalizeToView.area_maximum_view)
Settings.Default.AreaLogScale = false;
UpdatePeakAreaGraph();
}
private void peptideLogScaleContextMenuItem_Click(object sender, EventArgs e)
{
ShowPeptideLogScale(peptideLogScaleContextMenuItem.Checked);
}
public void ShowPeptideLogScale(bool isChecked)
{
Settings.Default.AreaLogScale = isChecked ;
if (isChecked)
AreaGraphController.AreaView = AreaNormalizeToView.none;
UpdateSummaryGraphs();
}
private void peptideCvsContextMenuItem_Click(object sender, EventArgs e)
{
ShowCVValues(peptideCvsContextMenuItem.Checked);
}
public void ShowCVValues(bool isChecked)
{
Settings.Default.ShowPeptideCV = isChecked;
// Showing CVs only makes sense for Replicates = All
Settings.Default.ShowRegressionReplicateEnum = ReplicateDisplay.all.ToString();
// Showing CVs does not make sense for All retention time values at once
// But this is confusing now, with replicate annotation grouping
// if (RTPeptideGraphPane.RTValue == RTPeptideValue.All)
// Settings.Default.RTPeptideValue = RTPeptideValue.Retention.ToString();
UpdateSummaryGraphs();
}
private void areaPropsContextMenuItem_Click(object sender, EventArgs e)
{
switch (ContextMenuGraphSummary.Type)
{
case GraphTypeSummary.replicate:
case GraphTypeSummary.peptide:
ShowAreaPropertyDlg();
break;
case GraphTypeSummary.histogram:
case GraphTypeSummary.histogram2d:
ShowAreaCVPropertyDlg(ContextMenuGraphSummary);
break;
}
}
public void ShowAreaCVPropertyDlg(GraphSummary graphSummary)
{
using (var dlgProperties = new AreaCVToolbarProperties(graphSummary))
{
if (dlgProperties.ShowDialog(this) == DialogResult.OK)
UpdatePeakAreaGraph();
}
}
public void ShowAreaPropertyDlg()
{
using (var dlg = new AreaChartPropertyDlg())
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
UpdateSummaryGraphs();
}
}
}
private void areaNormalizeContextMenuItem_DropDownOpening(object sender, EventArgs e)
{
ToolStripMenuItem menu = areaNormalizeContextMenuItem;
// Remove menu items up to the "Global Standards" menu item.
while (!ReferenceEquals(areaNormalizeGlobalContextMenuItem, menu.DropDownItems[0]))
menu.DropDownItems.RemoveAt(0);
var areaView = AreaGraphController.AreaView;
var settings = DocumentUI.Settings;
var mods = settings.PeptideSettings.Modifications;
var standardTypes = mods.RatioInternalStandardTypes;
// Add the Heavy option to the areaNormalizeContextMenuItem if there are heavy modifications
if (mods.HasHeavyModifications)
{
for (int i = 0; i < standardTypes.Count; i++)
{
var handler = new SelectNormalizeHandler(this, i);
var item = new ToolStripMenuItem(standardTypes[i].Title, null, handler.ToolStripMenuItemClick)
{
Checked = (SequenceTree.RatioIndex == i &&
areaView == AreaNormalizeToView.area_ratio_view)
};
menu.DropDownItems.Insert(i, item);
}
}
bool globalStandard = settings.HasGlobalStandardArea;
areaNormalizeGlobalContextMenuItem.Visible = globalStandard;
areaNormalizeGlobalContextMenuItem.Checked = (areaView == AreaNormalizeToView.area_global_standard_view);
if (!globalStandard && areaView == AreaNormalizeToView.area_global_standard_view)
areaView = AreaNormalizeToView.none;
areaNormalizeTotalContextMenuItem.Checked = (areaView == AreaNormalizeToView.area_percent_view);
areaNormalizeMaximumContextMenuItem.Checked = (areaView == AreaNormalizeToView.area_maximum_view);
areaNormalizeNoneContextMenuItem.Checked = (areaView == AreaNormalizeToView.none);
}
private class SelectNormalizeHandler : SelectRatioHandler
{
public SelectNormalizeHandler(SkylineWindow skyline, int ratioIndex) : base(skyline, ratioIndex)
{
}
protected override void OnMenuItemClick()
{
AreaGraphController.AreaView = AreaNormalizeToView.area_ratio_view;
base.OnMenuItemClick();
_skyline.UpdatePeakAreaGraph();
}
}
public void SetNormalizeIndex(int index)
{
new SelectNormalizeHandler(this, index).Select();
}
private void areaNormalizeGlobalContextMenuItem_Click(object sender, EventArgs e)
{
NormalizeAreaGraphTo(AreaNormalizeToView.area_global_standard_view);
}
private void areaNormalizeTotalContextMenuItem_Click(object sender, EventArgs e)
{
NormalizeAreaGraphTo(AreaNormalizeToView.area_percent_view);
}
private void areaNormalizeNoneContextMenuItem_Click(object sender, EventArgs e)
{
NormalizeAreaGraphTo(AreaNormalizeToView.none);
}
private void areaNormalizeMaximumContextMenuItem_Click(object sender, EventArgs e)
{
NormalizeAreaGraphTo(AreaNormalizeToView.area_maximum_view);
}
private void showLibraryPeakAreaContextMenuItem_Click(object sender, EventArgs e)
{
// Show/hide the library column in the peak area view.
Settings.Default.ShowLibraryPeakArea = !Settings.Default.ShowLibraryPeakArea;
UpdateSummaryGraphs();
}
private void showDotProductToolStripMenuItem_Click(object sender, EventArgs e)
{
Settings.Default.ShowDotProductPeakArea = !Settings.Default.ShowDotProductPeakArea;
UpdateSummaryGraphs();
}
private void showPeakAreaLegendContextMenuItem_Click(object sender, EventArgs e)
{
ShowPeakAreaLegend(!Settings.Default.ShowPeakAreaLegend);
}
public void ShowPeakAreaLegend(bool show)
{
Settings.Default.ShowPeakAreaLegend = show;
UpdateSummaryGraphs();
}
public void UpdatePeakAreaGraph()
{
_listGraphPeakArea.ForEach(g => g.UpdateUI());
}
private void UpdateSummaryGraphs()
{
UpdateRetentionTimeGraph();
UpdatePeakAreaGraph();
UpdateMassErrorGraph();
UpdateDetectionsGraph();
}
#endregion
#region Mass error graph
public GraphSummary GraphMassError { get { return _listGraphMassError.FirstOrDefault(); } }
public void UpdateUIGraphMassError(bool visible)
{
var list = Settings.Default.MassErrorGraphTypes.ToArray();
ShowGraphMassError(visible);
if (!visible)
{
Settings.Default.MassErrorGraphTypes.Clear();
Settings.Default.MassErrorGraphTypes.AddRange(list);
}
}
public void ShowGraphMassError(bool show)
{
Settings.Default.MassErrorGraphTypes.ToList().ForEach(t => ShowGraphMassError(show, t));
}
public void ShowGraphMassError(bool show, GraphTypeSummary type)
{
ShowGraph(_listGraphMassError, show, type, CreateGraphMassError);
}
private GraphSummary CreateGraphMassError(GraphTypeSummary type)
{
if (type == GraphTypeSummary.invalid)
return null;
var graph = new GraphSummary(type, this, new MassErrorGraphController(), SelectedResultsIndex);
graph.FormClosed += graphMassError_FormClosed;
graph.VisibleChanged += graphMassError_VisibleChanged;
graph.GraphControl.ZoomEvent += GraphControl_ZoomEvent;
_listGraphMassError.Insert(0, graph);
return graph;
}
private void DestroyGraphMassError(GraphSummary graph)
{
graph.FormClosed -= graphMassError_FormClosed;
graph.VisibleChanged -= graphMassError_VisibleChanged;
graph.HideOnClose = false;
graph.Close();
_listGraphMassError.Remove(graph);
Settings.Default.MassErrorGraphTypes.Remove(graph.Type);
}
private void graphMassError_VisibleChanged(object sender, EventArgs e)
{
var graph = (GraphSummary)sender;
if (graph.Visible)
{
Settings.Default.MassErrorGraphTypes.Insert(0, graph.Type);
_listGraphMassError.Remove(graph);
_listGraphMassError.Insert(0, graph);
}
else if (graph.IsHidden)
{
Settings.Default.MassErrorGraphTypes.Remove(graph.Type);
}
}
private void graphMassError_FormClosed(object sender, FormClosedEventArgs e)
{
GraphSummary graph = (GraphSummary) sender;
_listGraphMassError.Remove(graph);
Settings.Default.MassErrorGraphTypes.Remove(graph.Type);
}
private void massErrorReplicateComparisonMenuItem_Click(object sender, EventArgs e)
{
ShowMassErrorReplicateComparison();
}
public void ShowMassErrorReplicateComparison()
{
Settings.Default.MassErrorGraphTypes.Insert(0, GraphTypeSummary.replicate);
ShowGraphMassError(true, GraphTypeSummary.replicate);
UpdateMassErrorGraph();
SynchronizeSummaryZooming();
}
private void massErrorPeptideComparisonMenuItem_Click(object sender, EventArgs e)
{
ShowMassErrorPeptideGraph();
}
public void ShowMassErrorPeptideGraph()
{
Settings.Default.MassErrorGraphTypes.Insert(0, GraphTypeSummary.peptide);
ShowGraphMassError(true, GraphTypeSummary.peptide);
UpdateMassErrorGraph();
SynchronizeSummaryZooming();
}
private void massErrorHistogramMenuItem_Click(object sender, EventArgs e)
{
ShowMassErrorHistogramGraph();
}
public void ShowMassErrorHistogramGraph()
{
Settings.Default.MassErrorGraphTypes.Insert(0, GraphTypeSummary.histogram);
ShowGraphMassError(true, GraphTypeSummary.histogram);
UpdateMassErrorGraph();
}
private void massErrorHistogram2DMenuItem_Click(object sender, EventArgs e)
{
ShowMassErrorHistogramGraph2D();
}
public void ShowMassErrorHistogramGraph2D()
{
Settings.Default.MassErrorGraphTypes.Insert(0, GraphTypeSummary.histogram2d);
ShowGraphMassError(true, GraphTypeSummary.histogram2d);
UpdateMassErrorGraph();
}
public void UpdateMassErrorGraph()
{
_listGraphMassError.ForEach(g => g.UpdateUI());
}
private void massErrorMenuItem_DropDownOpening(object sender, EventArgs e)
{
var types = Settings.Default.MassErrorGraphTypes;
massErrorReplicateComparisonContextMenuItem.Checked = massErrorReplicateComparisonMenuItem.Checked =
GraphChecked(_listGraphMassError, types, GraphTypeSummary.replicate);
massErrorPeptideComparisonContextMenuItem.Checked = massErrorPeptideComparisonMenuItem.Checked =
GraphChecked(_listGraphMassError, types, GraphTypeSummary.peptide);
massErrorHistogramContextMenuItem.Checked = massErrorHistogramMenuItem.Checked =
GraphChecked(_listGraphMassError, types, GraphTypeSummary.histogram);
massErrorHistogram2DContextMenuItem.Checked = massErrorHistogram2DMenuItem.Checked =
GraphChecked(_listGraphMassError, types, GraphTypeSummary.histogram2d);
}
private void BuildMassErrorGraphMenu(GraphSummary graph, ToolStrip menuStrip)
{
// Store original menuitems in an array, and insert a separator
ToolStripItem[] items = new ToolStripItem[menuStrip.Items.Count];
int iUnzoom = -1;
for (int i = 0; i < items.Length; i++)
{
items[i] = menuStrip.Items[i];
string tag = (string)items[i].Tag;
if (tag == @"unzoom")
iUnzoom = i;
}
if (iUnzoom != -1)
menuStrip.Items.Insert(iUnzoom, toolStripSeparator25); // TODO: Use another separator?
// Insert skyline specific menus
var set = Settings.Default;
int iInsert = 0;
var graphType = graph.Type;
menuStrip.Items.Insert(iInsert++, massErrorGraphContextMenuItem);
if (massErrorGraphContextMenuItem.DropDownItems.Count == 0) {
massErrorGraphContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
massErrorReplicateComparisonContextMenuItem,
massErrorPeptideComparisonContextMenuItem,
massErrorHistogramContextMenuItem,
massErrorHistogram2DContextMenuItem
});
}
menuStrip.Items.Insert(iInsert++, toolStripSeparator16);
if (graphType == GraphTypeSummary.peptide ||
graphType == GraphTypeSummary.replicate)
{
AddTransitionContextMenu(menuStrip, iInsert++);
}
if (graphType == GraphTypeSummary.replicate)
{
iInsert = AddReplicateOrderAndGroupByMenuItems(menuStrip, iInsert);
var massErrorReplicateGraphPane = graph.GraphPanes.FirstOrDefault() as MassErrorReplicateGraphPane;
if (massErrorReplicateGraphPane != null)
{
// If the mass error graph is being displayed and it shows a legend,
// display the "Legend" option
if (massErrorReplicateGraphPane.CanShowMassErrorLegend)
{
showMassErrorLegendContextMenuItem.Checked = set.ShowMassErrorLegend; // TODO: Mass error legend
menuStrip.Items.Insert(iInsert++, showMassErrorLegendContextMenuItem);
}
}
}
else if (graphType == GraphTypeSummary.peptide)
{
AddPeptideOrderContextMenu(menuStrip, iInsert++);
iInsert = AddReplicatesContextMenu(menuStrip, iInsert);
AddScopeContextMenu(menuStrip, iInsert++);
}
else if (graphType == GraphTypeSummary.histogram || graphType == GraphTypeSummary.histogram2d)
{
iInsert = AddReplicatesContextMenu(menuStrip, iInsert);
iInsert = AddPointsContextMenu(menuStrip, iInsert);
massErrorTargetsContextMenuItem.Checked = MassErrorGraphController.PointsType == PointsTypeMassError.targets;
massErrorDecoysContextMenuItem.Checked = MassErrorGraphController.PointsType == PointsTypeMassError.decoys;
bool trained = DocumentUI.Settings.PeptideSettings.Integration.PeakScoringModel.IsTrained;
massErrorTargets1FDRContextMenuItem.Visible = trained;
massErrorTargets1FDRContextMenuItem.Checked = MassErrorGraphController.PointsType == PointsTypeMassError.targets_1FDR;
if (!trained && massErrorTargets1FDRContextMenuItem.Checked)
{
massErrorTargetsContextMenuItem.Checked = true;
}
iInsert = AddBinCountContextMenu(menuStrip, iInsert);
iInsert = AddTransitionsMassErrorContextMenu(menuStrip, iInsert);
}
if (graphType == GraphTypeSummary.histogram2d)
{
iInsert = AddXAxisContextMenu(menuStrip, iInsert);
menuStrip.Items.Insert(iInsert++, massErrorlogScaleContextMenuItem);
massErrorlogScaleContextMenuItem.Checked = Settings.Default.MassErrorHistogram2DLogScale;
}
if (graphType == GraphTypeSummary.peptide || (null != Settings.Default.GroupByReplicateAnnotation && graphType == GraphTypeSummary.replicate))
{
menuStrip.Items.Insert(iInsert++, peptideCvsContextMenuItem);
peptideCvsContextMenuItem.Checked = set.ShowPeptideCV;
}
if (graphType == GraphTypeSummary.peptide ||
graphType == GraphTypeSummary.replicate)
{
selectionContextMenuItem.Checked = set.ShowReplicateSelection;
menuStrip.Items.Insert(iInsert++, selectionContextMenuItem);
synchronizeSummaryZoomingContextMenuItem.Checked = set.SynchronizeSummaryZooming;
menuStrip.Items.Insert(iInsert++, synchronizeSummaryZoomingContextMenuItem);
}
menuStrip.Items.Insert(iInsert++, toolStripSeparator24);
menuStrip.Items.Insert(iInsert++, massErrorPropsContextMenuItem);
menuStrip.Items.Insert(iInsert, toolStripSeparator28);
// Remove some ZedGraph menu items not of interest
foreach (var item in items)
{
string tag = (string)item.Tag;
if (tag == @"set_default" || tag == @"show_val")
menuStrip.Items.Remove(item);
}
}
private int AddPointsContextMenu(ToolStrip menuStrip, int iInsert)
{
menuStrip.Items.Insert(iInsert++, massErrorPointsContextMenuItem);
if (massErrorPointsContextMenuItem.DropDownItems.Count == 0) {
massErrorPointsContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
massErrorTargetsContextMenuItem,
massErrorTargets1FDRContextMenuItem,
massErrorDecoysContextMenuItem
});
}
return iInsert;
}
private int AddBinCountContextMenu(ToolStrip menuStrip, int iInsert)
{
menuStrip.Items.Insert(iInsert++, binCountContextMenuItem);
if (binCountContextMenuItem.DropDownItems.Count == 0) {
binCountContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
ppm05ContextMenuItem,
ppm10ContextMenuItem,
ppm15ContextMenuItem,
ppm20ContextMenuItem
});
}
return iInsert;
}
private int AddTransitionsMassErrorContextMenu(ToolStrip menuStrip, int iInsert)
{
menuStrip.Items.Insert(iInsert++, massErrorTransitionsContextMenuItem);
if (massErrorTransitionsContextMenuItem.DropDownItems.Count == 0) {
massErrorTransitionsContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
massErrorAllTransitionsContextMenuItem,
massErrorBestTransitionsContextMenuItem,
toolStripSeparator55,
MassErrorPrecursorsContextMenuItem,
MassErrorProductsContextMenuItem
});
}
return iInsert;
}
private int AddXAxisContextMenu(ToolStrip menuStrip, int iInsert)
{
menuStrip.Items.Insert(iInsert++, massErrorXAxisContextMenuItem);
if (massErrorXAxisContextMenuItem.DropDownItems.Count == 0) {
massErrorXAxisContextMenuItem.DropDownItems.AddRange(new ToolStripItem[]
{
massErorrRetentionTimeContextMenuItem,
massErrorMassToChargContextMenuItem
});
}
return iInsert;
}
private void massErrorTransitionsContextMenuItem_DropDownOpening(object sender, EventArgs e)
{
massErrorAllTransitionsContextMenuItem.Checked = MassErrorGraphController.HistogramTransiton == TransitionMassError.all;
massErrorBestTransitionsContextMenuItem.Checked = MassErrorGraphController.HistogramTransiton == TransitionMassError.best;
MassErrorPrecursorsContextMenuItem.Checked = MassErrorGraphController.HistogramDisplayType == DisplayTypeMassError.precursors;
MassErrorProductsContextMenuItem.Checked = MassErrorGraphController.HistogramDisplayType == DisplayTypeMassError.products;
}
private void massErrorAllTransitionsContextMenuItem_Click(object sender, EventArgs e)
{
ChangeMassErrorTransition(TransitionMassError.all);
}
private void massErrorBestTransitionsContextMenuItem_Click(object sender, EventArgs e)
{
ChangeMassErrorTransition(TransitionMassError.best);
}
public void ChangeMassErrorTransition(TransitionMassError transitionMassError)
{
MassErrorGraphController.HistogramTransiton = transitionMassError;
UpdateMassErrorGraph();
}
private void MassErrorPrecursorsContextMenuItem_Click(object sender, EventArgs e)
{
ChangeMassErrorDisplayType(DisplayTypeMassError.precursors);
}
private void MassErrorProductsContextMenuItem_Click(object sender, EventArgs e)
{
ChangeMassErrorDisplayType(DisplayTypeMassError.products);
}
public void ChangeMassErrorDisplayType(DisplayTypeMassError displayType)
{
MassErrorGraphController.HistogramDisplayType = displayType;
UpdateMassErrorGraph();
}
private void massErrorXAxisContextMenuItem_DropDownOpening(object sender, EventArgs e)
{
massErrorMassToChargContextMenuItem.Checked = MassErrorGraphController.Histogram2DXAxis == Histogram2DXAxis.mass_to_charge;
massErorrRetentionTimeContextMenuItem.Checked = MassErrorGraphController.Histogram2DXAxis == Histogram2DXAxis.retention_time;
}
private void massErorrRetentionTimeContextMenuItem_Click(object sender, EventArgs e)
{
UpdateXAxis(Histogram2DXAxis.retention_time);
}
private void massErrorMassToChargContextMenuItem_Click(object sender, EventArgs e)
{
UpdateXAxis(Histogram2DXAxis.mass_to_charge);
}
public void UpdateXAxis(Histogram2DXAxis Xaxis)
{
MassErrorGraphController.Histogram2DXAxis = Xaxis;
UpdateMassErrorGraph();
}
private void showMassErrorLegendContextMenuItem_Click(object sender, EventArgs e)
{
ShowMassErrorLegend(!Settings.Default.ShowMassErrorLegend);
}
public void ShowMassErrorLegend(bool show)
{
Settings.Default.ShowMassErrorLegend = show;
UpdateSummaryGraphs();
}
private void massErrorlogScaleContextMenuItem_Click(object sender, EventArgs e)
{
SwitchLogScale();
}
public void SwitchLogScale()
{
Settings.Default.MassErrorHistogram2DLogScale = !Settings.Default.MassErrorHistogram2DLogScale;
UpdateMassErrorGraph();
}
private void binCountContextMenuItem_DropDownOpening(object sender, EventArgs e)
{
UpdatePpmMenuItem(ppm05ContextMenuItem, 0.5);
UpdatePpmMenuItem(ppm10ContextMenuItem, 1.0);
UpdatePpmMenuItem(ppm15ContextMenuItem, 1.5);
UpdatePpmMenuItem(ppm20ContextMenuItem, 2.0);
}
private void UpdatePpmMenuItem(ToolStripMenuItem toolStripMenuItem, double ppm)
{
toolStripMenuItem.Checked = Settings.Default.MassErorrHistogramBinSize == ppm;
toolStripMenuItem.Text = string.Format(@"{0:F01} ppm", ppm);
}
private void ppm05ContextMenuItem_Click(object sender, EventArgs e)
{
UpdateBinSize(0.5);
}
private void ppm10ContextMenuItem_Click(object sender, EventArgs e)
{
UpdateBinSize(1);
}
private void ppm15ContextMenuItem_Click(object sender, EventArgs e)
{
UpdateBinSize(1.5);
}
private void ppm20ContextMenuItem_Click(object sender, EventArgs e)
{
UpdateBinSize(2);
}
public void UpdateBinSize(double bin)
{
Settings.Default.MassErorrHistogramBinSize = bin;
UpdateMassErrorGraph();
}
private void massErrorTargetsContextMenuItem_Click(object sender, EventArgs e)
{
ShowPointsTypeMassError(PointsTypeMassError.targets);
}
private void massErrorDecoysContextMenuItem_Click(object sender, EventArgs e)
{
ShowPointsTypeMassError(PointsTypeMassError.decoys);
}
private void massErrorTargets1FDRContextMenuItem_Click(object sender, EventArgs e)
{
ShowPointsTypeMassError(PointsTypeMassError.targets_1FDR);
}
public void ShowPointsTypeMassError(PointsTypeMassError pointsTypeMassError)
{
MassErrorGraphController.PointsType = pointsTypeMassError;
UpdateMassErrorGraph();
}
private void massErrorPropsContextMenuItem_Click(object sender, EventArgs e)
{
ShowMassErrorPropertyDlg();
}
public void ShowMassErrorPropertyDlg()
{
using (var dlg = new MassErrorChartPropertyDlg())
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
UpdateSummaryGraphs();
}
}
}
#endregion
#region Detections Graph
private void detectionsPlotsMenuItem_Click(object sender, EventArgs e)
{
var types = Settings.Default.DetectionGraphTypes;
detectionsReplicateComparisonMenuItem.Checked = detectionsReplicateComparisonMenuItem.Checked = GraphChecked(_listGraphDetections, types, GraphTypeSummary.detections);
detectionsHistogramMenuItem.Checked = detectionsHistogramMenuItem.Checked = GraphChecked(_listGraphDetections, types, GraphTypeSummary.detections_histogram);
}
private void graphDetections_DropDownOpening(object sender, EventArgs e)
{
var types = Settings.Default.DetectionGraphTypes;
detectionsReplicateComparisonMenuItem.Checked = detectionsReplicateComparisonMenuItem.Checked = GraphChecked(_listGraphDetections, types, GraphTypeSummary.detections);
detectionsHistogramMenuItem.Checked = detectionsHistogramMenuItem.Checked = GraphChecked(_listGraphDetections, types, GraphTypeSummary.detections_histogram);
}
private void detectionsReplicateComparisonMenuItem_Click(object sender, EventArgs e)
{
ShowDetectionsReplicateComparisonGraph();
}
private void detectionsHistogramMenuItem_Click(object sender, EventArgs e)
{
ShowDetectionsHistogramGraph();
}
public void UpdateUIGraphDetection(bool visible)
{
var list = Settings.Default.DetectionGraphTypes.ToArray();
ShowGraphDetection(visible);
if (!visible)
{
Settings.Default.DetectionGraphTypes.Clear();
Settings.Default.DetectionGraphTypes.AddRange(list);
}
}
public void ShowDetectionsReplicateComparisonGraph()
{
Settings.Default.DetectionGraphTypes.Insert(0, GraphTypeSummary.detections);
ShowGraphDetection(true, GraphTypeSummary.detections);
UpdateDetectionsGraph();
}
public void ShowDetectionsHistogramGraph()
{
Settings.Default.DetectionGraphTypes.Insert(0, GraphTypeSummary.detections_histogram);
ShowGraphDetection(true, GraphTypeSummary.detections_histogram);
UpdateDetectionsGraph();
}
public void ShowGraphDetection(bool show)
{
Settings.Default.DetectionGraphTypes.ToList().ForEach(t => ShowGraphDetection(show, t));
}
public void ShowGraphDetection(bool show, GraphTypeSummary type)
{
ShowGraph(_listGraphDetections, show, type, CreateGraphDetections);
}
private GraphSummary CreateGraphDetections(GraphTypeSummary type)
{
if (type == GraphTypeSummary.invalid)
return null;
GraphSummary graph = new GraphSummary(type, this, new DetectionsGraphController(), SelectedResultsIndex);
graph.FormClosed += graphDetections_FormClosed;
graph.VisibleChanged += graphDetections_VisibleChanged;
graph.GraphControl.ZoomEvent += GraphControl_ZoomEvent;
graph.Toolbar = new DetectionsToolbar(graph);
_listGraphDetections.Insert(0, graph);
return graph;
}
private void DestroyGraphDetections(GraphSummary graph)
{
graph.FormClosed -= graphDetections_FormClosed;
graph.VisibleChanged -= graphDetections_VisibleChanged;
graph.HideOnClose = false;
graph.Close();
_listGraphDetections.Remove(graph);
Settings.Default.DetectionGraphTypes.Remove(graph.Type);
}
private void graphDetections_VisibleChanged(object sender, EventArgs e)
{
var graph = (GraphSummary)sender;
if (graph.Visible)
{
Settings.Default.DetectionGraphTypes.Insert(0, graph.Type);
_listGraphDetections.Remove(graph);
_listGraphDetections.Insert(0, graph);
}
else if (graph.IsHidden)
{
Settings.Default.DetectionGraphTypes.Remove(graph.Type);
}
}
private void graphDetections_FormClosed(object sender, FormClosedEventArgs e)
{
GraphSummary graph = (GraphSummary)sender;
_listGraphDetections.Remove(graph);
Settings.Default.DetectionGraphTypes.Remove(graph.Type);
}
public void UpdateDetectionsGraph()
{
_listGraphDetections.ForEach(g => g.UpdateUI());
}
public GraphSummary DetectionsPlot { get { return _listGraphDetections.FirstOrDefault(); } }
private void BuildDetectionsGraphMenu(GraphSummary graph, ToolStrip menuStrip)
{
// Store original menu items in an array, and insert a separator
ToolStripItem[] items = new ToolStripItem[menuStrip.Items.Count];
int iUnzoom = -1;
for (int i = 0; i < items.Length; i++)
{
items[i] = menuStrip.Items[i];
string tag = (string)items[i].Tag;
if (tag == @"unzoom")
iUnzoom = i;
}
if (iUnzoom != -1)
menuStrip.Items.Insert(iUnzoom, detectionsToolStripSeparator1);
// Insert skyline specific menus
int iInsert = 0;
var graphType = graph.Type;
menuStrip.Items.Insert(iInsert++, detectionsGraphTypeToolStripMenuItem);
menuStrip.Items.Insert(iInsert++, detectionsTargetToolStripMenuItem);
menuStrip.Items.Insert(iInsert++, detectionsToolStripSeparator2);
if(graphType == GraphTypeSummary.detections)
menuStrip.Items.Insert(iInsert++, detectionsShowToolStripMenuItem);
menuStrip.Items.Insert(iInsert++, detectionsYScaleToolStripMenuItem);
menuStrip.Items.Insert(iInsert++, detectionsPropertiesToolStripMenuItem);
detectionsPropertiesToolStripMenuItem.Tag = graph;
menuStrip.Items.Insert(iInsert++, detectionsToolStripSeparator3);
// Remove some ZedGraph menu items not of interest
foreach (var item in items)
{
string tag = (string)item.Tag;
if (tag == @"set_default" || tag == @"show_val")
menuStrip.Items.Remove(item);
}
//Update menu according to the current settings
detectionsShowMeanToolStripMenuItem.Checked = DetectionsGraphController.Settings.ShowMean;
detectionsShowSelectionToolStripMenuItem.Checked = DetectionsGraphController.Settings.ShowSelection;
detectionsShowLegendToolStripMenuItem.Checked = DetectionsGraphController.Settings.ShowLegend;
detectionsShowAtLeastNToolStripMenuItem.Checked = DetectionsGraphController.Settings.ShowAtLeastN;
foreach (var item in new[]
{
detectionsYScaleOneToolStripMenuItem,
detectionsYScalePercentToolStripMenuItem
})
{
item.Checked = ((int) item.Tag) == DetectionsGraphController.Settings.YScaleFactor.Value;
item.Text = DetectionsGraphController.YScaleFactorType.GetValues()
.First((e) => ((int) item.Tag) == e.Value).ToString();
}
foreach (var item in new[]
{
detectionsTargetPrecursorToolStripMenuItem,
detectionsTargetPeptideToolStripMenuItem
})
item.Checked = ((int)item.Tag) == DetectionsGraphController.Settings.TargetType.Value;
}
private void detectionsPropertiesToolStripMenuItem_Click(object sender, EventArgs e)
{
if (sender is ToolStripMenuItem item)
{
if (item.Tag is GraphSummary graph)
ShowDetectionsPropertyDlg(graph);
}
}
public void ShowDetectionsPropertyDlg(GraphSummary graph)
{
using (var dlg = new DetectionToolbarProperties(graph))
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
UpdateSummaryGraphs();
}
}
}
private void detectionsYScaleOneToolStripMenuItem_Click(object sender, EventArgs e)
{
DetectionsGraphController.Settings.YScaleFactor = DetectionsGraphController.YScaleFactorType.ONE;
UpdateDetectionsGraph();
}
private void detectionsYScalePercentToolStripMenuItem_Click(object sender, EventArgs e)
{
DetectionsGraphController.Settings.YScaleFactor = DetectionsGraphController.YScaleFactorType.PERCENT;
UpdateDetectionsGraph();
}
private void detectionsShowSelectionToolStripMenuItem_Click(object sender, EventArgs e)
{
DetectionsGraphController.Settings.ShowSelection = !DetectionsGraphController.Settings.ShowSelection;
UpdateDetectionsGraph();
}
private void detectionsShowLegendToolStripMenuItem_Click(object sender, EventArgs e)
{
DetectionsGraphController.Settings.ShowLegend = !DetectionsGraphController.Settings.ShowLegend;
UpdateDetectionsGraph();
}
private void detectionsShowMeanToolStripMenuItem_Click(object sender, EventArgs e)
{
DetectionsGraphController.Settings.ShowMean = !DetectionsGraphController.Settings.ShowMean;
UpdateDetectionsGraph();
}
private void detectionsShowAtLeastNToolStripMenuItem_Click(object sender, EventArgs e)
{
DetectionsGraphController.Settings.ShowAtLeastN = !DetectionsGraphController.Settings.ShowAtLeastN;
UpdateDetectionsGraph();
}
private void detectionsTargetPrecursorToolStripMenuItem_Click(object sender, EventArgs e)
{
DetectionsGraphController.Settings.TargetType = DetectionsGraphController.TargetType.PRECURSOR;
UpdateDetectionsGraph();
}
private void detectionsTargetPeptideToolStripMenuItem_Click(object sender, EventArgs e)
{
DetectionsGraphController.Settings.TargetType = DetectionsGraphController.TargetType.PEPTIDE;
detectionsTargetPrecursorToolStripMenuItem.Checked = false;
detectionsTargetPeptideToolStripMenuItem.Checked = true;
UpdateDetectionsGraph();
}
private void detectionsGraphTypeReplicateToolStripMenuItem_Click(object sender, EventArgs e)
{
ShowDetectionsReplicateComparisonGraph();
}
private void detectionsGraphTypeHistogramToolStripMenuItem_Click(object sender, EventArgs e)
{
ShowDetectionsHistogramGraph();
}
#endregion
#region Results Grid
private void resultsGridMenuItem_Click(object sender, EventArgs e)
{
ShowResultsGrid(Settings.Default.ShowResultsGrid = true);
}
public void ShowResultsGrid(bool show)
{
if (show)
{
if (_resultsGridForm != null && !Program.SkylineOffscreen)
{
_resultsGridForm.Activate();
}
else
{
_resultsGridForm = _resultsGridForm ?? CreateResultsGrid();
var rectFloat = GetFloatingRectangleForNewWindow();
_resultsGridForm.Show(dockPanel, rectFloat);
}
}
else
{
if (_resultsGridForm != null)
{
_resultsGridForm.Hide();
}
}
}
private DockableForm CreateResultsGrid()
{
Debug.Assert(null == _resultsGridForm);
_resultsGridForm = new LiveResultsGrid(this);
_resultsGridForm.FormClosed += resultsGrid_FormClosed;
_resultsGridForm.VisibleChanged += resultsGrid_VisibleChanged;
return _resultsGridForm;
}
private void DestroyResultsGrid()
{
if (_resultsGridForm != null)
{
_resultsGridForm.FormClosed -= resultsGrid_FormClosed;
_resultsGridForm.VisibleChanged -= resultsGrid_VisibleChanged;
_resultsGridForm.HideOnClose = false;
_resultsGridForm.Close();
_resultsGridForm = null;
}
}
private void resultsGrid_VisibleChanged(object sender, EventArgs e)
{
Settings.Default.ShowResultsGrid = (_resultsGridForm != null && _resultsGridForm.Visible);
}
void resultsGrid_FormClosed(object sender, FormClosedEventArgs e)
{
// Update settings and menu check
Settings.Default.ShowResultsGrid = false;
_resultsGridForm = null;
}
#endregion
#region Document Grid
private void documentGridMenuItem_Click(object sender, EventArgs e)
{
ShowDocumentGrid(true);
}
public void ShowDocumentGrid(bool show)
{
if (show)
{
if (_documentGridForm != null && !Program.SkylineOffscreen)
{
_documentGridForm.Activate();
}
else
{
_documentGridForm = _documentGridForm ?? CreateDocumentGrid();
if (_documentGridForm != null)
{
var rectFloat = GetFloatingRectangleForNewWindow();
_documentGridForm.Show(dockPanel, rectFloat);
}
}
}
else
{
if (_documentGridForm != null)
{
_documentGridForm.Close();
}
}
}
private DocumentGridForm CreateDocumentGrid()
{
Assume.IsNull(_documentGridForm);
_documentGridForm = new DocumentGridForm(this);
_documentGridForm.FormClosed += documentGrid_FormClosed;
if (!string.IsNullOrEmpty(Settings.Default.DocumentGridView))
{
var viewName = ViewName.Parse(Settings.Default.DocumentGridView);
if (viewName.HasValue)
{
_documentGridForm.DataboundGridControl.ChooseView(viewName.Value);
}
}
return _documentGridForm;
}
private void DestroyDocumentGrid()
{
if (null != _documentGridForm)
{
_documentGridForm.FormClosed -= documentGrid_FormClosed;
_documentGridForm.Close();
_documentGridForm = null;
}
}
void documentGrid_FormClosed(object sender, FormClosedEventArgs e)
{
_documentGridForm = null;
}
#endregion
#region Calibration Curves
private void calibrationCurvesMenuItem_Click(object sender, EventArgs e)
{
ShowCalibrationForm();
}
private CalibrationForm CreateCalibrationForm()
{
Assume.IsNull(_calibrationForm);
_calibrationForm = new CalibrationForm(this);
_calibrationForm.FormClosed += calibrationForm_FormClosed;
return _calibrationForm;
}
void calibrationForm_FormClosed(object sender, FormClosedEventArgs e)
{
_calibrationForm = null;
}
private void DestroyCalibrationForm()
{
if (null != _calibrationForm)
{
_calibrationForm.FormClosed -= calibrationForm_FormClosed;
_calibrationForm.Close();
_calibrationForm = null;
}
}
public CalibrationForm ShowCalibrationForm()
{
if (null != _calibrationForm)
{
_calibrationForm.Activate();
}
else
{
var rectFloat = GetFloatingRectangleForNewWindow();
CreateCalibrationForm().Show(dockPanel, rectFloat);
}
return _calibrationForm;
}
#endregion
#region Audit Log
public void ShowAuditLog()
{
if (_auditLogForm != null && !Program.SkylineOffscreen)
{
_auditLogForm.Activate();
}
else
{
_auditLogForm = _auditLogForm ?? CreateAuditLogForm();
if (_auditLogForm != null)
{
var rectFloat = GetFloatingRectangleForNewWindow();
_auditLogForm.Show(dockPanel, rectFloat);
}
}
}
private AuditLogForm CreateAuditLogForm()
{
if (_auditLogForm == null)
{
_auditLogForm = AuditLogForm.MakeAuditLogForm(this);
_auditLogForm.FormClosed += _auditLogForm_FormClosed;
}
return _auditLogForm;
}
private void DestroyAuditLogForm()
{
if (_auditLogForm != null)
{
_auditLogForm.FormClosed -= _auditLogForm_FormClosed;
_auditLogForm.Close();
_auditLogForm = null;
}
}
private void auditLogMenuItem_Click(object sender, EventArgs e)
{
ShowAuditLog();
}
private void _auditLogForm_FormClosed(object sender, FormClosedEventArgs e)
{
_auditLogForm = null;
}
public void ClearAuditLog()
{
if (!Document.AuditLog.AuditLogEntries.IsRoot)
{
ModifyDocument(AuditLogStrings.AuditLogForm__clearLogButton_Click_Clear_audit_log,
document => document.ChangeAuditLog(AuditLogEntry.ROOT), docPair => AuditLogEntry.ClearLogEntry(docPair.OldDoc));
}
}
public AuditLogForm AuditLogForm
{
get { return _auditLogForm; }
}
#endregion
#region Graph layout
private const double MAX_TILED_ASPECT_RATIO = 2;
private void arrangeTiledMenuItem_Click(object sender, EventArgs e)
{
ArrangeGraphsTiled();
}
public void ArrangeGraphsTiled()
{
ArrangeGraphs(DisplayGraphsType.Tiled);
}
private void arrangeRowMenuItem_Click(object sender, EventArgs e)
{
ArrangeGraphs(DisplayGraphsType.Row);
}
private void arrangeColumnMenuItem_Click(object sender, EventArgs e)
{
ArrangeGraphs(DisplayGraphsType.Column);
}
public void ArrangeGraphs(DisplayGraphsType displayGraphsType)
{
var listGraphs = GetArrangeableGraphs();
if (listGraphs.Count < 2)
return;
using (new DockPanelLayoutLock(dockPanel, true))
{
ArrangeGraphsGrouped(listGraphs, listGraphs.Count, GroupGraphsType.separated, displayGraphsType);
}
}
private void arrangeTabbedMenuItem_Click(object sender, EventArgs e)
{
ArrangeGraphsTabbed();
}
public void ArrangeGraphsTabbed()
{
var listGraphs = GetArrangeableGraphs();
if (listGraphs.Count < 2)
return;
using (new DockPanelLayoutLock(dockPanel, true))
{
ArrangeGraphsTabbed(listGraphs);
}
}
private void arrangeGroupedMenuItem_Click(object sender, EventArgs e)
{
ArrangeGraphsGrouped();
}
public void ArrangeGraphsGrouped()
{
var order = Helpers.ParseEnum(Settings.Default.ArrangeGraphsOrder, GroupGraphsOrder.Position);
bool reversed = Settings.Default.ArrangeGraphsReversed;
var listGraphs = GetArrangeableGraphs(order, reversed);
using (var dlg = new ArrangeGraphsGroupedDlg(listGraphs.Count))
{
if (dlg.ShowDialog(this) == DialogResult.OK)
{
if (order != dlg.GroupOrder || reversed != dlg.Reversed)
listGraphs = GetArrangeableGraphs(dlg.GroupOrder, dlg.Reversed);
if (listGraphs.Count < 2)
return;
using (new DockPanelLayoutLock(dockPanel, true))
{
ArrangeGraphsGrouped(listGraphs, dlg.Groups, dlg.GroupType, dlg.DisplayType);
}
}
}
}
private void ArrangeGraphsGrouped(IList<DockableForm> listGraphs, int groups, GroupGraphsType groupType, DisplayGraphsType displayType)
{
// First just arrange everything into a single pane
ArrangeGraphsTabbed(listGraphs);
// Figure out how to distribute the panes into rows and columns
var documentPane = FindPane(listGraphs[0]);
double width = documentPane.Width;
double height = documentPane.Height;
int rows;
if (displayType == DisplayGraphsType.Row)
{
rows = 1;
}
else if (displayType == DisplayGraphsType.Column)
{
rows = groups;
}
else
{
rows = 1;
while ((height / rows) / (width / (groups / rows + (groups % rows > 0 ? 1 : 0))) > MAX_TILED_ASPECT_RATIO)
rows++;
}
int longRows = groups%rows;
int columnsShort = groups/rows;
// Distribute the forms into lists representing rows, columns, and groups
var listTiles = new List<List<List<DockableForm>>>();
if (groupType == GroupGraphsType.distributed)
{
// As if dealing a card deck over the groups
int iForm = 0;
int forms = listGraphs.Count;
while (iForm < listGraphs.Count)
{
for (int iRow = 0; iRow < rows; iRow++)
{
if (listTiles.Count <= iRow)
listTiles.Add(new List<List<DockableForm>>());
var rowTiles = listTiles[iRow];
int columns = columnsShort + (iRow < longRows ? 1 : 0);
for (int iCol = 0; iCol < columns && iForm < forms; iCol++)
{
if (rowTiles.Count <= iCol)
rowTiles.Add(new List<DockableForm>());
var tabbedForms = rowTiles[iCol];
tabbedForms.Add(listGraphs[iForm++]);
}
}
}
}
else
{
// Filling each group before continuing to the next
int count = listGraphs.Count;
int longGroups = count % groups;
int tabsShort = count / groups;
for (int iRow = 0, iGroup = 0, iForm = 0; iRow < rows; iRow++)
{
var rowTiles = new List<List<DockableForm>>();
listTiles.Add(rowTiles);
int columns = columnsShort + (iRow < longRows ? 1 : 0);
for (int iCol = 0; iCol < columns; iCol++)
{
var tabbedForms = new List<DockableForm>();
rowTiles.Add(tabbedForms);
int tabs = tabsShort + (iGroup++ < longGroups ? 1 : 0);
for (int iTab = 0; iTab < tabs; iTab++)
{
tabbedForms.Add(listGraphs[iForm++]);
}
}
}
}
// Place the forms in the dock panel
// Rows first
for (int i = 1; i < rows; i++)
{
PlacePane(i, 0, rows, DockPaneAlignment.Bottom, listTiles);
}
// Then columns in the rows
for (int i = 0; i < rows; i++)
{
int columns = listTiles[i].Count;
for (int j = 1; j < columns; j++)
{
PlacePane(i, j, columns, DockPaneAlignment.Right, listTiles);
}
}
}
private void PlacePane(int row, int col, int count,
DockPaneAlignment alignment, IList<List<List<DockableForm>>> listTiles)
{
DockableForm previousForm = alignment == DockPaneAlignment.Bottom
? listTiles[row - 1][col][0]
: listTiles[row][col - 1][0];
DockPane previousPane = FindPane(previousForm);
var groupForms = listTiles[row][col];
var dockableForm = groupForms[0];
int dim = alignment == DockPaneAlignment.Bottom ? row : col;
dockableForm.Show(previousPane, alignment,
((double)(count - dim)) / (count - dim + 1));
ArrangeGraphsTabbed(groupForms);
}
private void ArrangeGraphsTabbed(IList<DockableForm> groupForms)
{
if (groupForms.Count < 2)
return;
DockPane primaryPane = FindPane(groupForms[0]);
for (int i = 1; i < groupForms.Count; i++)
groupForms[i].Show(primaryPane, null);
}
private List<DockableForm> GetArrangeableGraphs()
{
var order = Helpers.ParseEnum(Settings.Default.ArrangeGraphsOrder, GroupGraphsOrder.Position);
return GetArrangeableGraphs(order, Settings.Default.ArrangeGraphsReversed);
}
private List<DockableForm> GetArrangeableGraphs(GroupGraphsOrder order, bool reversed)
{
List<DockPane> listPanes = dockPanel.Panes
.Where(pane => !pane.IsHidden && pane.DockState == DockState.Document)
.ToList();
if (order == GroupGraphsOrder.Position)
{
listPanes.Sort((p1, p2) =>
{
if (p1.Top != p2.Top)
return p1.Top - p2.Top;
return p1.Left - p2.Left;
});
if (reversed)
listPanes.Reverse();
}
var listGraphs = new List<DockableForm>();
foreach (var pane in listPanes)
{
IEnumerable<IDockableForm> listForms = pane.Contents;
if (order == GroupGraphsOrder.Position && reversed)
listForms = listForms.Reverse();
foreach (DockableForm dockableForm in listForms)
{
if (dockableForm.IsHidden || dockableForm.DockState != DockState.Document)
continue;
listGraphs.Add(dockableForm);
}
}
if (order != GroupGraphsOrder.Position)
{
// Populate a dictionary with the desired document order
var dictOrder = new Dictionary<DockableForm, int>();
int iOrder = 0;
if (_graphSpectrum != null)
dictOrder.Add(_graphSpectrum, iOrder++);
_listGraphRetentionTime.ForEach(g => dictOrder.Add(g, iOrder++));
_listGraphPeakArea.ForEach(g => dictOrder.Add(g, iOrder++));
if (DocumentUI.Settings.HasResults)
{
var chromatograms = DocumentUI.Settings.MeasuredResults.Chromatograms.ToList();
if (order == GroupGraphsOrder.Acquired_Time)
{
chromatograms.Sort((c1, c2) =>
{
var time1 = GetRunStartTime(c1);
var time2 = GetRunStartTime(c2);
if (!time1.HasValue && !time2.HasValue)
{
return 0;
}
else if (!time1.HasValue)
{
return 1;
}
else if (!time2.HasValue)
{
return -1;
}
return time1.Value.CompareTo(time2.Value);
});
}
foreach (var chromatogramSet in chromatograms)
{
var graphChrom = GetGraphChrom(chromatogramSet.Name);
if (graphChrom != null)
dictOrder.Add(graphChrom, iOrder++);
}
}
// Make sure everything is represented, though it should
// already be.
foreach (var graph in listGraphs)
{
int i;
if (!dictOrder.TryGetValue(graph, out i))
dictOrder.Add(graph, iOrder++);
}
// Sort the list of visible document panes by the document order
// in the dictionary
listGraphs.Sort((g1, g2) => dictOrder[g1] - dictOrder[g2]);
if (reversed)
listGraphs.Reverse();
}
return listGraphs;
}
public DateTime? GetRunStartTime(ChromatogramSet chromatogramSet)
{
DateTime? runStartTime = null;
foreach (var fileInfo in chromatogramSet.MSDataFileInfos)
{
if (!fileInfo.RunStartTime.HasValue)
{
continue;
}
if (!runStartTime.HasValue || runStartTime.Value > fileInfo.RunStartTime.Value)
{
runStartTime = fileInfo.RunStartTime;
}
}
return runStartTime;
}
#endregion
}
}
| 1 | 13,608 | Better if you give split[0], split[1], and split[2] descriptive names than using them this much through so many lines of code. | ProteoWizard-pwiz | .cs |
@@ -41,8 +41,6 @@ export const getSetupIncompleteComponent = ( module, inGrid = false, fullWidth =
return ctaWrapper( cta, inGrid, fullWidth, createGrid );
};
-export default getSetupIncompleteComponent;
-
/**
* Creates a CTA component when module needs to be activated. Different wrapper HTML is needed depending on where the CTA gets output, which is determined by the inGrid, fullWidth, and createGrid parameters.
* | 1 |
/**
* `getSetupIncompleteComponents` function.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import ctaWrapper from './cta-wrapper';
import CompleteModuleActivationCTA from '../CompleteModuleActivationCTA';
import ActivateModuleCTA from '../ActivateModuleCTA';
/**
* Creates a CTA component when module needs to be configured. Different wrapper HTML is needed depending on where the CTA gets output, which is determined by the inGrid, fullWidth, and createGrid parameters.
*
* @since 1.0.0
*
* @param {string} module Module slug.
* @param {boolean} inGrid Creates layout to fit within an existing grid with 'cell' classes. Default is half-width grid cells. Default: false.
* @param {boolean} fullWidth Creates layout with 'cell--span-12' to be full width. Default: false.
* @param {boolean} createGrid Adds a full grid layout with padding. Default: false.
* @return {WPElement} Returns CTA component with configuration CTA.
*/
export const getSetupIncompleteComponent = ( module, inGrid = false, fullWidth = false, createGrid = false ) => {
const cta = <CompleteModuleActivationCTA moduleSlug={ module } />;
return ctaWrapper( cta, inGrid, fullWidth, createGrid );
};
export default getSetupIncompleteComponent;
/**
* Creates a CTA component when module needs to be activated. Different wrapper HTML is needed depending on where the CTA gets output, which is determined by the inGrid, fullWidth, and createGrid parameters.
*
* @since 1.26.0
*
* @param {string} module Module slug.
* @param {boolean} inGrid Creates layout to fit within an existing grid with 'cell' classes. Default is half-width grid cells. Default: false.
* @param {boolean} fullWidth Creates layout with 'cell--span-12' to be full width. Default: false.
* @param {boolean} createGrid Adds a full grid layout with padding. Default: false.
* @return {WPElement} Returns CTA component with configuration CTA.
*/
export const getModuleInactiveComponent = ( module, inGrid = false, fullWidth = false, createGrid = false ) => {
const cta = <ActivateModuleCTA moduleSlug={ module } />;
return ctaWrapper( cta, inGrid, fullWidth, createGrid );
};
| 1 | 40,129 | I know we're deleting this soon but I thought I'd fix it anyway :smile: | google-site-kit-wp | js |
@@ -58,9 +58,9 @@ public final class Slf4jConstantLogMessage extends BugChecker implements MethodI
List<? extends ExpressionTree> args = tree.getArguments();
ExpressionTree messageArg = ASTHelpers.isCastable(
- ASTHelpers.getType(tree.getArguments().get(0)),
- state.getTypeFromString("org.slf4j.Marker"),
- state)
+ ASTHelpers.getType(tree.getArguments().get(0)),
+ state.getTypeFromString("org.slf4j.Marker"),
+ state)
? args.get(1)
: args.get(0);
| 1 | /*
* (c) Copyright 2017 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.errorprone;
import com.google.auto.service.AutoService;
import com.google.errorprone.BugPattern;
import com.google.errorprone.BugPattern.SeverityLevel;
import com.google.errorprone.VisitorState;
import com.google.errorprone.bugpatterns.BugChecker;
import com.google.errorprone.bugpatterns.BugChecker.MethodInvocationTreeMatcher;
import com.google.errorprone.matchers.CompileTimeConstantExpressionMatcher;
import com.google.errorprone.matchers.Description;
import com.google.errorprone.matchers.Matcher;
import com.google.errorprone.matchers.method.MethodMatchers;
import com.google.errorprone.util.ASTHelpers;
import com.sun.source.tree.ExpressionTree;
import com.sun.source.tree.MethodInvocationTree;
import java.util.List;
import java.util.regex.Pattern;
@AutoService(BugChecker.class)
@BugPattern(
name = "Slf4jConstantLogMessage",
link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks",
linkType = BugPattern.LinkType.CUSTOM,
severity = SeverityLevel.ERROR,
summary = "Allow only compile-time constant slf4j log message strings.")
public final class Slf4jConstantLogMessage extends BugChecker implements MethodInvocationTreeMatcher {
private static final long serialVersionUID = 1L;
private static final Matcher<ExpressionTree> LOG_METHOD = MethodMatchers.instanceMethod()
.onDescendantOf("org.slf4j.Logger")
.withNameMatching(Pattern.compile("trace|debug|info|warn|error"));
private final Matcher<ExpressionTree> compileTimeConstExpressionMatcher =
new CompileTimeConstantExpressionMatcher();
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (!LOG_METHOD.matches(tree, state)) {
return Description.NO_MATCH;
}
List<? extends ExpressionTree> args = tree.getArguments();
ExpressionTree messageArg = ASTHelpers.isCastable(
ASTHelpers.getType(tree.getArguments().get(0)),
state.getTypeFromString("org.slf4j.Marker"),
state)
? args.get(1)
: args.get(0);
if (compileTimeConstExpressionMatcher.matches(messageArg, state)) {
return Description.NO_MATCH;
}
return buildDescription(tree).setMessage("slf4j log statement uses a non-constant expression").build();
}
}
| 1 | 7,746 | I like this, makes it clearer what scope the continuation is in | palantir-gradle-baseline | java |
@@ -367,12 +367,12 @@ describe('MergeCells', () => {
keyDownUp('tab');
keyDownUp('enter');
- expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
+ expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('A1');
keyDownUp('tab');
keyDownUp('enter');
- expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
+ expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('B1');
});
it('should select the cell in the top-left corner of the merged cell, when navigating down using the SHIFT + ENTER key on the' + | 1 | describe('MergeCells', () => {
let id = 'testContainer';
beforeEach(function() {
this.$container = $(`<div id="${id}"></div>`).appendTo('body');
});
afterEach(function() {
if (this.$container) {
destroy();
this.$container.remove();
}
});
describe('initialization', () => {
it('should merge cell in startup', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 5),
mergeCells: [
{row: 0, col: 0, rowspan: 2, colspan: 2}
]
});
let TD = hot.rootElement.querySelector('td');
expect(TD.getAttribute('rowspan')).toBe('2');
expect(TD.getAttribute('colspan')).toBe('2');
});
});
describe('methods', () => {
it('should clear merged cells collection without throw an exception', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(50, 1),
width: 100,
height: 100,
mergeCells: [
{row: 0, col: 0, rowspan: 2, colspan: 1},
{row: 4, col: 0, rowspan: 30, colspan: 1},
{row: 48, col: 0, rowspan: 2, colspan: 1},
],
});
expect(() => {
hot.getPlugin('mergeCells').clearCollections();
}).not.toThrow();
});
});
describe('mergeCells updateSettings', () => {
it('should allow to overwrite the initial settings using the updateSettings method', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 10),
mergeCells: [
{row: 0, col: 0, rowspan: 2, colspan: 2}
]
});
let TD = hot.rootElement.querySelector('td');
expect(TD.getAttribute('rowspan')).toBe('2');
expect(TD.getAttribute('colspan')).toBe('2');
updateSettings({
mergeCells: [
{row: 2, col: 2, rowspan: 2, colspan: 2}
]
});
TD = hot.rootElement.querySelector('td');
expect(TD.getAttribute('rowspan')).toBe(null);
expect(TD.getAttribute('colspan')).toBe(null);
TD = getCell(2, 2);
expect(TD.getAttribute('rowspan')).toBe('2');
expect(TD.getAttribute('colspan')).toBe('2');
});
it('should allow resetting the merged cells by changing it to an empty array', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 10),
mergeCells: [
{row: 0, col: 0, rowspan: 2, colspan: 2}
]
});
let TD = hot.rootElement.querySelector('td');
expect(TD.getAttribute('rowspan')).toBe('2');
expect(TD.getAttribute('colspan')).toBe('2');
updateSettings({
mergeCells: []
});
TD = hot.rootElement.querySelector('td');
expect(TD.getAttribute('rowspan')).toBe(null);
expect(TD.getAttribute('colspan')).toBe(null);
});
it('should allow resetting and turning off the mergeCells plugin by changing mergeCells to \'false\'', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 10),
mergeCells: [
{row: 0, col: 0, rowspan: 2, colspan: 2}
]
});
let TD = hot.rootElement.querySelector('td');
expect(TD.getAttribute('rowspan')).toBe('2');
expect(TD.getAttribute('colspan')).toBe('2');
updateSettings({
mergeCells: false
});
TD = hot.rootElement.querySelector('td');
expect(TD.getAttribute('rowspan')).toBe(null);
expect(TD.getAttribute('colspan')).toBe(null);
});
});
describe('mergeCells copy', () => {
it('should not copy text of cells that are merged into another cell', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 5),
mergeCells: [
{row: 0, col: 0, rowspan: 2, colspan: 2}
]
});
expect(hot.getCopyableText(0, 0, 2, 2)).toBe('A1\t\tC1\n\t\tC2\nA3\tB3\tC3');
});
});
describe('merged cells selection', () => {
it('should select the whole range of cells which form a merged cell', function() {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(4, 4),
mergeCells: [
{
row: 0,
col: 0,
colspan: 4,
rowspan: 1
}
]
});
let $table = this.$container.find('table.htCore');
let $td = $table.find('tr:eq(0) td:eq(0)');
expect($td.attr('rowspan')).toEqual('1');
expect($td.attr('colspan')).toEqual('4');
expect(hot.getSelectedLast()).toBeUndefined();
hot.selectCell(0, 0);
expect(hot.getSelectedLast()).toEqual([0, 0, 0, 3]);
deselectCell();
hot.selectCell(0, 1);
expect(hot.getSelectedLast()).toEqual([0, 0, 0, 3]);
});
it('should always make a rectangular selection, when selecting merged and not merged cells', function() {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(4, 4),
mergeCells: [
{
row: 1,
col: 1,
colspan: 3,
rowspan: 2
}
]
});
let $table = this.$container.find('table.htCore');
let $td = $table.find('tr:eq(1) td:eq(1)');
expect($td.attr('rowspan')).toEqual('2');
expect($td.attr('colspan')).toEqual('3');
expect(hot.getSelectedLast()).toBeUndefined();
hot.selectCell(0, 0);
expect(hot.getSelectedLast()).toEqual([0, 0, 0, 0]);
deselectCell();
hot.selectCell(0, 0, 1, 1);
expect(hot.getSelectedLast()).not.toEqual([0, 0, 1, 1]);
expect(hot.getSelectedLast()).toEqual([0, 0, 2, 3]);
deselectCell();
hot.selectCell(0, 1, 1, 1);
expect(hot.getSelectedLast()).toEqual([0, 1, 2, 3]);
});
it('should not switch the selection start point when selecting from non-merged cells to merged cells', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 10),
mergeCells: [
{row: 1, col: 1, rowspan: 3, colspan: 3},
{row: 3, col: 4, rowspan: 2, colspan: 2}
]
});
$(hot.getCell(6, 6)).simulate('mousedown');
expect(hot.getSelectedRangeLast().from.col).toEqual(6);
expect(hot.getSelectedRangeLast().from.row).toEqual(6);
$(hot.getCell(1, 1)).simulate('mouseenter');
expect(hot.getSelectedRangeLast().from.col).toEqual(6);
expect(hot.getSelectedRangeLast().from.row).toEqual(6);
$(hot.getCell(3, 3)).simulate('mouseenter');
expect(hot.getSelectedRangeLast().from.col).toEqual(6);
expect(hot.getSelectedRangeLast().from.row).toEqual(6);
$(hot.getCell(4, 4)).simulate('mouseenter');
expect(hot.getSelectedRangeLast().from.col).toEqual(6);
expect(hot.getSelectedRangeLast().from.row).toEqual(6);
});
it('should select cells in the correct direction when changing selections around a merged range', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 10),
mergeCells: [
{row: 4, col: 4, rowspan: 2, colspan: 2}
]
});
hot.selectCell(5, 5, 5, 2);
expect(hot.getSelectedRangeLast().getDirection()).toEqual('SE-NW');
hot.selectCell(4, 4, 2, 5);
expect(hot.getSelectedRangeLast().getDirection()).toEqual('SW-NE');
hot.selectCell(4, 4, 5, 7);
expect(hot.getSelectedRangeLast().getDirection()).toEqual('NW-SE');
hot.selectCell(4, 5, 7, 5);
expect(hot.getSelectedRangeLast().getDirection()).toEqual('NE-SW');
});
it('should not add an area class to the selected cell if a single merged cell is selected', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(6, 6),
mergeCells: [
{
row: 1,
col: 1,
colspan: 3,
rowspan: 2
}
]
});
selectCell(1, 1);
expect(getCell(1, 1).className.indexOf('area')).toEqual(-1);
selectCell(1, 1, 4, 4);
expect(getCell(1, 1).className.indexOf('area')).not.toEqual(-1);
selectCell(1, 1);
expect(getCell(1, 1).className.indexOf('area')).toEqual(-1);
selectCell(0, 0);
expect(getCell(1, 1).className.indexOf('area')).toEqual(-1);
});
it('should render fill handle after merge cells', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: true
});
const plugin = hot.getPlugin('mergeCells');
hot.selectCell(0, 0, 2, 2);
plugin.mergeSelection();
expect(spec().$container.find('.wtBorder.current.corner:visible').length).toEqual(1);
});
it('should render fill handle when merge cells is highlighted cell in right bottom corner', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{ row: 2, col: 2, rowspan: 2, colspan: 2 }
]
});
hot.selectCell(2, 2, 1, 1);
expect(spec().$container.find('.wtBorder.corner:visible').length).toEqual(1);
});
it('should render fill handle when cell in right bottom corner is a merged cell', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{ row: 2, col: 2, rowspan: 2, colspan: 2 }
]
});
hot.selectCell(1, 1, 2, 2);
expect(spec().$container.find('.wtBorder.corner:visible').length).toEqual(1);
});
it('should select the cell in the top-left corner of the merged cell, when navigating down using the ENTER key on the' +
' bottom edge of the table', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{ row: 8, col: 8, rowspan: 2, colspan: 2 }
]
});
hot.setDataAtCell(8, 8, 'top-left-corner!');
hot.selectCell(7, 9);
keyDownUp('enter');
keyDownUp('enter');
keyDownUp('enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
keyDownUp('enter');
keyDownUp('enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
keyDownUp('enter');
keyDownUp('enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
});
it('should select the cell in the top-left corner of the merged cell, when navigating down using the TAB key on the' +
' bottom edge of the table', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{ row: 8, col: 8, rowspan: 2, colspan: 2 }
]
});
hot.setDataAtCell(8, 8, 'top-left-corner!');
hot.selectCell(9, 7);
keyDownUp('enter');
keyDownUp('tab');
keyDownUp('enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
keyDownUp('tab');
keyDownUp('enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
keyDownUp('tab');
keyDownUp('enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
});
it('should select the cell in the top-left corner of the merged cell, when navigating down using the SHIFT + ENTER key on the' +
' top edge of the table', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{ row: 0, col: 0, rowspan: 2, colspan: 2 }
]
});
hot.setDataAtCell(0, 0, 'top-left-corner!');
hot.selectCell(2, 1);
keyDownUp('shift+enter');
keyDownUp('shift+enter');
keyDownUp('shift+enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
keyDownUp('shift+enter');
keyDownUp('shift+enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
keyDownUp('shift+enter');
keyDownUp('shift+enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
});
it('should select the cell in the top-left corner of the merged cell, when navigating down using the SHIFT + TAB key on the' +
' top edge of the table', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{ row: 0, col: 0, rowspan: 2, colspan: 2 }
]
});
hot.setDataAtCell(0, 0, 'top-left-corner!');
hot.selectCell(1, 2);
keyDownUp('shift+enter');
keyDownUp('shift+tab');
keyDownUp('shift+enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
keyDownUp('shift+tab');
keyDownUp('shift+enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
keyDownUp('shift+tab');
keyDownUp('shift+enter');
expect(spec().$container.find('.handsontableInputHolder textarea').val()).toEqual('top-left-corner!');
keyDownUp('shift+enter');
});
});
describe('merged cells scroll', () => {
it('getCell should return merged cell parent', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 5),
mergeCells: [
{row: 0, col: 0, rowspan: 2, colspan: 2}
],
height: 100,
width: 400
});
let mergedCellParent = hot.getCell(0, 0);
let mergedCellHidden = hot.getCell(1, 1);
expect(mergedCellHidden).toBe(mergedCellParent);
});
it('should scroll viewport to beginning of a merged cell when it\'s clicked', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 5),
mergeCells: [
{row: 5, col: 0, rowspan: 2, colspan: 2}
],
height: 100,
width: 400
});
let mainHolder = hot.view.wt.wtTable.holder;
mainHolder.scrollTop = 130;
hot.render();
expect(mainHolder.scrollTop).toBe(130);
let TD = hot.getCell(5, 0);
mouseDown(TD);
mouseUp(TD);
let mergedCellScrollTop = mainHolder.scrollTop;
expect(mergedCellScrollTop).toBeLessThan(130);
expect(mergedCellScrollTop).toBeGreaterThan(0);
mainHolder.scrollTop = 0;
hot.render();
mainHolder.scrollTop = 130;
hot.render();
TD = hot.getCell(5, 2);
mouseDown(TD);
mouseUp(TD);
let regularCellScrollTop = mainHolder.scrollTop;
expect(mergedCellScrollTop).toBe(regularCellScrollTop);
});
it('should render whole merged cell even when most rows are not in the viewport - scrolled to top', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(40, 5),
mergeCells: [
{row: 1, col: 0, rowspan: 21, colspan: 2},
{row: 21, col: 2, rowspan: 18, colspan: 2}
],
height: 100,
width: 400
});
expect(hot.countRenderedRows()).toBe(39);
});
it('should render whole merged cell even when most rows are not in the viewport - scrolled to bottom', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(40, 5),
mergeCells: [
{row: 1, col: 0, rowspan: 21, colspan: 2},
{row: 21, col: 2, rowspan: 18, colspan: 2}
],
height: 100,
width: 400
});
let mainHolder = hot.view.wt.wtTable.holder;
$(mainHolder).scrollTop(99999);
hot.render();
expect(hot.countRenderedRows()).toBe(39);
});
it('should render whole merged cell even when most columns are not in the viewport - scrolled to the left', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(5, 40),
mergeCells: [
{row: 0, col: 1, rowspan: 2, colspan: 21},
{row: 2, col: 21, rowspan: 2, colspan: 18}
],
height: 100,
width: 400
});
expect(hot.countRenderedCols()).toBe(39);
});
it('should render whole merged cell even when most columns are not in the viewport - scrolled to the right', function() {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(5, 40),
mergeCells: [
{row: 0, col: 1, rowspan: 2, colspan: 21},
{row: 2, col: 21, rowspan: 2, colspan: 18}
],
height: 100,
width: 400
});
this.$container.scrollLeft(99999);
hot.render();
expect(hot.countRenderedCols()).toBe(39);
});
});
describe('merge cells shift', () => {
it('should shift the merged cells right, when inserting a column on the left side of them', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: [
{row: 1, col: 1, rowspan: 2, colspan: 2},
{row: 2, col: 5, rowspan: 2, colspan: 2}
],
height: 400,
width: 400
});
hot.alter('insert_col', 3, 2);
let plugin = hot.getPlugin('mergeCells');
let mergedCellsCollection = plugin.mergedCellsCollection.mergedCells;
expect(mergedCellsCollection[0].col).toEqual(1);
expect(mergedCellsCollection[1].col).toEqual(7);
});
it('should shift the merged cells left, when removing a column on the left side of them', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: [
{row: 1, col: 1, rowspan: 2, colspan: 2},
{row: 2, col: 5, rowspan: 2, colspan: 2}
],
height: 400,
width: 400
});
hot.alter('remove_col', 3, 2);
let plugin = hot.getPlugin('mergeCells');
let mergedCellsCollection = plugin.mergedCellsCollection.mergedCells;
expect(mergedCellsCollection[0].col).toEqual(1);
expect(mergedCellsCollection[1].col).toEqual(3);
});
it('should shift the merged cells down, when inserting rows above them', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: [
{row: 1, col: 1, rowspan: 2, colspan: 2},
{row: 5, col: 5, rowspan: 2, colspan: 2}
],
height: 400,
width: 400
});
hot.alter('insert_row', 3, 2);
let plugin = hot.getPlugin('mergeCells');
let mergedCellsCollection = plugin.mergedCellsCollection.mergedCells;
expect(mergedCellsCollection[0].row).toEqual(1);
expect(mergedCellsCollection[1].row).toEqual(7);
});
it('should shift the merged cells up, when removing rows above them', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: [
{row: 1, col: 1, rowspan: 2, colspan: 2},
{row: 5, col: 5, rowspan: 2, colspan: 2}
],
height: 400,
width: 400
});
hot.alter('remove_row', 3, 2);
let plugin = hot.getPlugin('mergeCells');
let mergedCellsCollection = plugin.mergedCellsCollection.mergedCells;
expect(mergedCellsCollection[0].row).toEqual(1);
expect(mergedCellsCollection[1].row).toEqual(3);
});
it('should trim the merged cell\'s height, when removing rows between their start and end', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: [
{row: 1, col: 1, rowspan: 5, colspan: 3}
],
height: 400,
width: 400
});
hot.alter('remove_row', 2, 2);
let plugin = hot.getPlugin('mergeCells');
let mergedCellsCollection = plugin.mergedCellsCollection.mergedCells;
expect(mergedCellsCollection[0].row).toEqual(1);
expect(mergedCellsCollection[0].rowspan).toEqual(3);
plugin.mergedCellsCollection.clear();
plugin.merge(1, 1, 2, 2);
hot.alter('remove_row', 2, 2);
expect(mergedCellsCollection[0].row).toEqual(1);
expect(mergedCellsCollection[0].rowspan).toEqual(1);
});
it('should trim the merged cell\'s width, when removing columns between their start and end', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: [
{row: 1, col: 1, rowspan: 3, colspan: 5}
],
height: 400,
width: 400
});
hot.alter('remove_col', 2, 2);
let plugin = hot.getPlugin('mergeCells');
let mergedCellsCollection = plugin.mergedCellsCollection.mergedCells;
expect(mergedCellsCollection[0].col).toEqual(1);
expect(mergedCellsCollection[0].colspan).toEqual(3);
plugin.mergedCellsCollection.clear();
plugin.merge(1, 1, 2, 2);
hot.alter('remove_col', 2, 2);
expect(mergedCellsCollection[0].col).toEqual(1);
expect(mergedCellsCollection[0].colspan).toEqual(1);
});
it('should shift the `row` of a merged cells, when removing rows consisting it', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: [
{row: 5, col: 5, rowspan: 5, colspan: 3}
],
height: 400,
width: 400
});
hot.alter('remove_row', 4, 3);
let plugin = hot.getPlugin('mergeCells');
let mergedCellsCollection = plugin.mergedCellsCollection.mergedCells;
expect(mergedCellsCollection[0].row).toEqual(4);
expect(mergedCellsCollection[0].rowspan).toEqual(3);
plugin.mergedCellsCollection.clear();
plugin.merge(1, 1, 2, 2);
hot.alter('remove_row', 0, 2);
expect(mergedCellsCollection[0].row).toEqual(0);
expect(mergedCellsCollection[0].rowspan).toEqual(1);
plugin.mergedCellsCollection.clear();
plugin.merge(1, 1, 2, 2);
hot.alter('remove_row', 1, 1);
expect(mergedCellsCollection[0].row).toEqual(1);
expect(mergedCellsCollection[0].rowspan).toEqual(1);
});
it('should shift the `col` of a merged cells, when removing columns consisting it', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: [
{row: 5, col: 5, rowspan: 3, colspan: 5}
],
height: 400,
width: 400
});
hot.alter('remove_col', 4, 3);
let plugin = hot.getPlugin('mergeCells');
let mergedCellsCollection = plugin.mergedCellsCollection.mergedCells;
expect(mergedCellsCollection[0].col).toEqual(4);
expect(mergedCellsCollection[0].colspan).toEqual(3);
plugin.mergedCellsCollection.clear();
plugin.merge(1, 1, 2, 2);
hot.alter('remove_col', 0, 2);
expect(mergedCellsCollection[0].col).toEqual(0);
expect(mergedCellsCollection[0].colspan).toEqual(1);
plugin.mergedCellsCollection.clear();
plugin.merge(1, 1, 2, 2);
hot.alter('remove_col', 1, 1);
expect(mergedCellsCollection[0].col).toEqual(1);
expect(mergedCellsCollection[0].colspan).toEqual(1);
});
it('should allow removing multiple merged cells, while removing multiple rows', () => {
const errorSpy = spyOn(console, 'error');
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: [
{row: 0, col: 0, rowspan: 2, colspan: 2},
{row: 5, col: 5, rowspan: 3, colspan: 3}
],
height: 400,
width: 400
});
hot.alter('remove_row', 0, 10);
expect(errorSpy).not.toHaveBeenCalled();
});
});
describe('merged cell candidates validation', () => {
it('should check if the provided merged cell information object contains negative values, and if so, do not add it ' +
'to the collection and throw an appropriate warning', () => {
const warnSpy = spyOn(console, 'warn');
const newMergedCells = [
{
row: 0,
col: 1,
rowspan: 3,
colspan: 4
},
{
row: -5,
col: 8,
rowspan: 3,
colspan: 4
},
{
row: 20,
col: -21,
rowspan: 3,
colspan: 4
},
{
row: 200,
col: 210,
rowspan: -3,
colspan: 4
},
{
row: 220,
col: 220,
rowspan: 3,
colspan: -4
}];
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: newMergedCells
});
expect(warnSpy).toHaveBeenCalledWith('The merged cell declared with {row: -5, col: 8, rowspan: 3, colspan: 4} ' +
'contains negative values, which is not supported. It will not be added to the collection.');
expect(warnSpy).toHaveBeenCalledWith('The merged cell declared with {row: 20, col: -21, rowspan: 3, colspan: 4} ' +
'contains negative values, which is not supported. It will not be added to the collection.');
expect(warnSpy).toHaveBeenCalledWith('The merged cell declared with {row: 200, col: 210, rowspan: -3, colspan: 4} ' +
'contains negative values, which is not supported. It will not be added to the collection.');
expect(warnSpy).toHaveBeenCalledWith('The merged cell declared with {row: 220, col: 220, rowspan: 3, colspan: -4} ' +
'contains negative values, which is not supported. It will not be added to the collection.');
expect(hot.getPlugin('mergeCells').mergedCellsCollection.mergedCells.length).toEqual(1);
});
it('should check if the provided merged cell information object has rowspan and colspan declared as 0, and if so, do not add it ' +
'to the collection and throw an appropriate warning', () => {
const warnSpy = spyOn(console, 'warn');
const newMergedCells = [
{
row: 0,
col: 1,
rowspan: 3,
colspan: 4
},
{
row: 6,
col: 6,
rowspan: 0,
colspan: 0
},
{
row: 9,
col: 9,
rowspan: 1,
colspan: 0
}
];
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: newMergedCells
});
expect(warnSpy).toHaveBeenCalledWith('The merged cell declared at [6, 6] has "rowspan" or ' +
'"colspan" declared as "0", which is not supported. It cannot be added to the collection.');
expect(warnSpy).toHaveBeenCalledWith('The merged cell declared at [9, 9] has "rowspan" or ' +
'"colspan" declared as "0", which is not supported. It cannot be added to the collection.');
expect(hot.getPlugin('mergeCells').mergedCellsCollection.mergedCells.length).toEqual(1);
});
it('should check if the provided merged cell information object represents a single cell, and if so, do not add it ' +
'to the collection and throw an appropriate warning', () => {
const warnSpy = spyOn(console, 'warn');
const newMergedCells = [
{
row: 0,
col: 1,
rowspan: 3,
colspan: 4
},
{
row: 5,
col: 8,
rowspan: 1,
colspan: 1
},
{
row: 20,
col: 21,
rowspan: 3,
colspan: 4
}
];
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(30, 30),
mergeCells: newMergedCells
});
expect(warnSpy).toHaveBeenCalledWith('The merged cell declared at [5, 8] has both "rowspan" and "colspan" ' +
'declared as "1", which makes it a single cell. It cannot be added to the collection.');
expect(hot.getPlugin('mergeCells').mergedCellsCollection.mergedCells.length).toEqual(2);
});
it('should check if the provided merged cell information object contains merged declared out of bounds, and if so, ' +
'do not add it to the collection and throw an appropriate warning', () => {
const warnSpy = spyOn(console, 'warn');
const newMergedCells = [
{
row: 0,
col: 1,
rowspan: 3,
colspan: 4
},
{
row: 17,
col: 17,
rowspan: 5,
colspan: 5
},
{
row: 20,
col: 21,
rowspan: 3,
colspan: 4
}
];
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(20, 20),
mergeCells: newMergedCells
});
expect(warnSpy).toHaveBeenCalledWith('The merged cell declared at [17, 17] is positioned ' +
'(or positioned partially) outside of the table range. It was not added to the table, please fix your setup.');
expect(warnSpy).toHaveBeenCalledWith('The merged cell declared at [20, 21] is positioned ' +
'(or positioned partially) outside of the table range. It was not added to the table, please fix your setup.');
expect(hot.getPlugin('mergeCells').mergedCellsCollection.mergedCells.length).toEqual(1);
});
});
xdescribe('canMergeRange', () => {
it('should return false if start and end cell is the same', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 5)
});
let mergeCells = new Handsontable.plugins.MergeCells(hot);
let result = mergeCells.canMergeRange({
from: {
row: 0, col: 1
},
to: {
row: 0, col: 1
}
});
expect(result).toBe(false);
});
it('should return true for 2 consecutive cells in the same column', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 5)
});
let mergeCells = new Handsontable.plugins.MergeCells(hot);
let result = mergeCells.canMergeRange({
from: {
row: 0, col: 1
},
to: {
row: 1, col: 1
}
});
expect(result).toBe(true);
});
it('should return true for 2 consecutive cells in the same row', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 5)
});
let mergeCells = hot.getPlugin('mergeCells');
let result = mergeCells.canMergeRange({
from: {
row: 0, col: 1
},
to: {
row: 0, col: 2
}
});
expect(result).toBe(true);
});
it('should return true for 4 neighboring cells', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 5)
});
let mergeCells = hot.getPlugin('mergeCells');
let result = mergeCells.canMergeRange({
from: {
row: 0, col: 1
},
to: {
row: 1, col: 2
}
});
expect(result).toBe(true);
});
});
xdescribe('modifyTransform', () => {
it('should not transform arrow right when entering a merged cell', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let coords = new CellCoords(1, 0);
let currentSelection = new CellRange(coords, coords, coords);
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let inDelta = new CellCoords(0, 1);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(0, 1));
});
it('should transform arrow right when leaving a merged cell', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let coords = new CellCoords(1, 1);
let currentSelection = new CellRange(coords, coords, coords);
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let inDelta = new CellCoords(0, 1);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(0, 3));
});
it('should transform arrow right when leaving a merged cell (return to desired row)', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let coords = new CellCoords(2, 0);
let currentSelection = new CellRange(coords, coords, coords);
let inDelta = new CellCoords(0, 1);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(-1, 1));
coords = new CellCoords(1, 1);
currentSelection = new CellRange(coords, coords, coords);
inDelta = new CellCoords(0, 1);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(1, 3));
});
it('should transform arrow left when entering a merged cell', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let coords = new CellCoords(1, 4);
let currentSelection = new CellRange(coords, coords, coords);
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let inDelta = new CellCoords(0, -1);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(0, -3));
});
it('should not transform arrow left when leaving a merged cell', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let coords = new CellCoords(1, 1);
let currentSelection = new CellRange(coords, coords, coords);
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let inDelta = new CellCoords(0, -1);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(0, -1));
});
it('should transform arrow left when leaving a merged cell (return to desired row)', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let coords = new CellCoords(2, 4);
let currentSelection = new CellRange(coords, coords, coords);
let inDelta = new CellCoords(0, -1);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(-1, -3));
coords = new CellCoords(1, 1);
currentSelection = new CellRange(coords, coords, coords);
inDelta = new CellCoords(0, -1);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(1, -1));
});
it('should not transform arrow down when entering a merged cell', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let coords = new CellCoords(0, 1);
let currentSelection = new CellRange(coords, coords, coords);
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let inDelta = new CellCoords(0, -1);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(0, -1));
});
it('should transform arrow down when leaving a merged cell', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let coords = new CellCoords(1, 1);
let currentSelection = new CellRange(coords, coords, coords);
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let inDelta = new CellCoords(1, 0);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(3, 0));
});
it('should transform arrow up when entering a merged cell', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let coords = new CellCoords(4, 1);
let currentSelection = new CellRange(coords, coords, coords);
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let inDelta = new CellCoords(-1, 0);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(-3, 0));
});
it('should not transform arrow up when leaving a merged cell', () => {
let mergeCellsSettings = [
{row: 1, col: 1, rowspan: 3, colspan: 3}
];
let coords = new CellCoords(1, 1);
let currentSelection = new CellRange(coords, coords, coords);
let mergeCells = new Handsontable.MergeCells(mergeCellsSettings);
let inDelta = new CellCoords(-1, 0);
mergeCells.modifyTransform('modifyTransformStart', currentSelection, inDelta);
expect(inDelta).toEqual(new CellCoords(-1, 0));
});
});
describe('ContextMenu', () => {
it('should disable `Merge cells` context menu item when context menu was triggered from corner header', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetObjectData(10, 5),
rowHeaders: true,
colHeaders: true,
contextMenu: true,
mergeCells: true,
});
$('.ht_clone_top_left_corner .htCore')
.find('thead')
.find('th')
.eq(0)
.simulate('mousedown', {which: 3});
contextMenu();
expect($('.htContextMenu tbody td.htDisabled').text()).toBe([
'Insert column left',
'Insert column right',
'Remove row',
'Remove column',
'Undo',
'Redo',
'Read only',
'Alignment',
'Merge cells',
].join(''));
});
});
describe('Validation', () => {
it('should not hide the merged cells after validating the table', (done) => {
let onAfterValidate = jasmine.createSpy('onAfterValidate');
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{row: 5, col: 4, rowspan: 2, colspan: 2},
{row: 1, col: 1, rowspan: 2, colspan: 2},
],
validator: function(query, callback) {
callback(true);
},
afterValidate: onAfterValidate
});
let firstCollection = hot.getCell(5, 4);
let secondCollection = hot.getCell(1, 1);
expect(firstCollection.style.display.indexOf('none')).toEqual(-1);
expect(secondCollection.style.display.indexOf('none')).toEqual(-1);
hot.validateCells();
setTimeout(() => {
expect(onAfterValidate).toHaveBeenCalled();
firstCollection = hot.getCell(5, 4);
secondCollection = hot.getCell(1, 1);
expect(firstCollection.style.display.indexOf('none')).toEqual(-1);
expect(secondCollection.style.display.indexOf('none')).toEqual(-1);
done();
}, 100);
});
});
describe('Entire row/column selection', () => {
it('should be possible to select a single entire column, when there\'s a merged cell in it', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{row: 5, col: 4, rowspan: 2, colspan: 5}
]
});
hot.selectCell(0, 5, 9, 5);
expect(JSON.stringify(hot.getSelectedLast())).toEqual('[0,5,9,5]');
// it should work only for selecting the entire column
hot.selectCell(4, 5, 7, 5);
expect(JSON.stringify(hot.getSelectedLast())).toEqual('[4,4,7,8]');
});
it('should be possible to select a single entire row, when there\'s a merged cell in it', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{row: 5, col: 4, rowspan: 5, colspan: 2}
]
});
hot.selectCell(5, 0, 5, 9);
expect(JSON.stringify(hot.getSelectedLast())).toEqual('[5,0,5,9]');
// it should work only for selecting the entire row
hot.selectCell(6, 3, 6, 7);
expect(JSON.stringify(hot.getSelectedLast())).toEqual('[5,3,9,7]');
});
});
describe('Undo/Redo', () => {
it('should not be possible to remove initially declared merged cells by calling the \'Undo\' action.', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: [
{row: 5, col: 4, rowspan: 2, colspan: 5},
{row: 1, col: 1, rowspan: 2, colspan: 2},
]
});
hot.undo();
expect(hot.getPlugin('mergeCells').mergedCellsCollection.mergedCells.length).toEqual(2);
});
it('should be possible undo the merging process by calling the \'Undo\' action.', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: true
});
const plugin = hot.getPlugin('mergeCells');
plugin.merge(0, 0, 3, 3);
hot.selectCell(4, 4, 7, 7);
plugin.mergeSelection();
expect(plugin.mergedCellsCollection.mergedCells.length).toEqual(2);
hot.undo();
expect(plugin.mergedCellsCollection.mergedCells.length).toEqual(1);
hot.undo();
expect(plugin.mergedCellsCollection.mergedCells.length).toEqual(0);
});
it('should be possible redo the merging process by calling the \'Redo\' action.', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(10, 10),
mergeCells: true
});
const plugin = hot.getPlugin('mergeCells');
plugin.merge(0, 0, 3, 3);
hot.selectCell(4, 4, 7, 7);
plugin.mergeSelection();
hot.undo();
hot.undo();
hot.redo();
expect(plugin.mergedCellsCollection.mergedCells.length).toEqual(1);
hot.redo();
expect(plugin.mergedCellsCollection.mergedCells.length).toEqual(2);
});
});
});
| 1 | 14,823 | These test checks if the value of the merged cells is correct. Please revert the changes and set `autoWrapCol` and `autoWrapRow` to `false` to the Handsontable instance. This change applies to the entire mergeCells.e2e.js file. | handsontable-handsontable | js |
@@ -598,3 +598,14 @@ func encodeAccounts(s *changeset.ChangeSet) ([]byte, error) {
}
// ---- Copy-Paste of code to decode ChangeSets: End -----
+
+var clearHashedChangesets = Migration{
+ Name: "clear_hashed_changesets",
+ Up: func(db ethdb.Database, tmpdir string, progress []byte, OnLoadCommit etl.LoadCommitHandler) error {
+ if err := db.(ethdb.BucketsMigrator).ClearBuckets(dbutils.AccountChangeSetBucket, dbutils.StorageChangeSetBucket); err != nil {
+ return err
+ }
+
+ return OnLoadCommit(db, nil, true)
+ },
+} | 1 | package migrations
import (
"bytes"
"encoding/binary"
"fmt"
"sort"
"time"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/changeset"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/etl"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/log"
)
var accChangeSetDupSort = Migration{
Name: "acc_change_set_dup_sort_18",
Up: func(db ethdb.Database, tmpdir string, progress []byte, CommitProgress etl.LoadCommitHandler) (err error) {
logEvery := time.NewTicker(30 * time.Second)
defer logEvery.Stop()
logPrefix := "data migration: change_set_dup_sort"
const loadStep = "load"
changeSetBucket := dbutils.PlainAccountChangeSetBucket
cmp := db.(ethdb.HasTx).Tx().Comparator(dbutils.PlainStorageChangeSetBucket)
buf := etl.NewSortableBuffer(etl.BufferOptimalSize)
buf.SetComparator(cmp)
collectorR, err1 := etl.NewCollectorFromFiles(tmpdir)
if err1 != nil {
return err1
}
switch string(progress) {
case "":
// can't use files if progress field not set, clear them
if collectorR != nil {
collectorR.Close(logPrefix)
collectorR = nil
}
case loadStep:
if collectorR == nil {
return ErrMigrationETLFilesDeleted
}
defer func() {
// don't clean if error or panic happened
if err != nil {
return
}
if rec := recover(); rec != nil {
panic(rec)
}
collectorR.Close(logPrefix)
}()
goto LoadStep
}
collectorR = etl.NewCriticalCollector(tmpdir, buf)
defer func() {
// don't clean if error or panic happened
if err != nil {
return
}
if rec := recover(); rec != nil {
panic(rec)
}
collectorR.Close(logPrefix)
}()
if err = db.Walk(changeSetBucket, nil, 0, func(kk, changesetBytes []byte) (bool, error) {
blockNum, _ := dbutils.DecodeTimestamp(kk)
select {
default:
case <-logEvery.C:
log.Info(fmt.Sprintf("[%s] Progress2", logPrefix), "blockNum", blockNum)
}
if err = accountChangeSetPlainBytesOld(changesetBytes).Walk(func(k, v []byte) error {
newK := make([]byte, 8)
binary.BigEndian.PutUint64(newK, blockNum)
newV := make([]byte, len(k)+len(v))
copy(newV, k)
copy(newV[len(k):], v)
return collectorR.Collect(newK, newV)
}); err != nil {
return false, err
}
return true, nil
}); err != nil {
return err
}
if err = db.(ethdb.BucketsMigrator).ClearBuckets(dbutils.PlainAccountChangeSetBucket); err != nil {
return fmt.Errorf("clearing the receipt bucket: %w", err)
}
// Commit clearing of the bucket - freelist should now be written to the database
if err = CommitProgress(db, []byte(loadStep), false); err != nil {
return fmt.Errorf("committing the removal of receipt table: %w", err)
}
LoadStep:
// Commit again
if err = CommitProgress(db, []byte(loadStep), false); err != nil {
return fmt.Errorf("committing the removal of receipt table: %w", err)
}
// Now transaction would have been re-opened, and we should be re-using the space
if err = collectorR.Load(logPrefix, db, dbutils.PlainAccountChangeSetBucket, etl.IdentityLoadFunc, etl.TransformArgs{
OnLoadCommit: CommitProgress,
}); err != nil {
return fmt.Errorf("loading the transformed data back into the receipts table: %w", err)
}
return nil
},
}
var storageChangeSetDupSort = Migration{
Name: "storage_change_set_dup_sort_22",
Up: func(db ethdb.Database, tmpdir string, progress []byte, CommitProgress etl.LoadCommitHandler) (err error) {
logEvery := time.NewTicker(30 * time.Second)
defer logEvery.Stop()
logPrefix := "data migration: storage_change_set_dup_sort"
const loadStep = "load"
changeSetBucket := dbutils.PlainStorageChangeSetBucket
cmp := db.(ethdb.HasTx).Tx().Comparator(dbutils.PlainStorageChangeSetBucket)
buf := etl.NewSortableBuffer(etl.BufferOptimalSize)
buf.SetComparator(cmp)
collectorR, err1 := etl.NewCollectorFromFiles(tmpdir)
if err1 != nil {
return err1
}
switch string(progress) {
case "":
// can't use files if progress field not set, clear them
if collectorR != nil {
collectorR.Close(logPrefix)
collectorR = nil
}
case loadStep:
if collectorR == nil {
return ErrMigrationETLFilesDeleted
}
defer func() {
// don't clean if error or panic happened
if err != nil {
return
}
if rec := recover(); rec != nil {
panic(rec)
}
collectorR.Close(logPrefix)
}()
goto LoadStep
}
collectorR = etl.NewCriticalCollector(tmpdir, buf)
defer func() {
// don't clean if error or panic happened
if err != nil {
return
}
if rec := recover(); rec != nil {
panic(rec)
}
collectorR.Close(logPrefix)
}()
if err = db.Walk(changeSetBucket, nil, 0, func(kk, changesetBytes []byte) (bool, error) {
blockNum, _ := dbutils.DecodeTimestamp(kk)
select {
default:
case <-logEvery.C:
log.Info(fmt.Sprintf("[%s] Progress", logPrefix), "blockNum", blockNum)
}
if err = storageChangeSetPlainBytesOld(changesetBytes).Walk(func(k, v []byte) error {
newK := make([]byte, 8+20+8)
binary.BigEndian.PutUint64(newK, blockNum)
copy(newK[8:], k[:20+8])
newV := make([]byte, 32+len(v))
copy(newV, k[20+8:])
copy(newV[32:], v)
return collectorR.Collect(newK, newV)
}); err != nil {
return false, err
}
return true, nil
}); err != nil {
return err
}
if err = db.(ethdb.BucketsMigrator).ClearBuckets(dbutils.PlainStorageChangeSetBucket); err != nil {
return fmt.Errorf("clearing the receipt bucket: %w", err)
}
// Commit clearing of the bucket - freelist should now be written to the database
if err = CommitProgress(db, []byte(loadStep), false); err != nil {
return fmt.Errorf("committing the removal of receipt table: %w", err)
}
LoadStep:
// Commit again
if err = CommitProgress(db, []byte(loadStep), false); err != nil {
return fmt.Errorf("committing the removal of receipt table: %w", err)
}
// Now transaction would have been re-opened, and we should be re-using the space
if err = collectorR.Load(logPrefix, db, dbutils.PlainStorageChangeSetBucket, etl.IdentityLoadFunc, etl.TransformArgs{
OnLoadCommit: CommitProgress,
Comparator: cmp,
}); err != nil {
return fmt.Errorf("loading the transformed data back into the receipts table: %w", err)
}
return nil
},
}
// ---- Copy-Paste of code to decode ChangeSets: Begin -----
type accountChangeSetPlainBytesOld []byte
func (b accountChangeSetPlainBytesOld) Walk(f func(k, v []byte) error) error {
return walkAccountChangeSet(b, common.AddressLength, f)
}
// walkAccountChangeSet iterates the account bytes with the keys of provided size
func walkAccountChangeSet(b []byte, keyLen uint32, f func(k, v []byte) error) error {
if len(b) == 0 {
return nil
}
if len(b) < 4 {
return fmt.Errorf("decode: input too short (%d bytes)", len(b))
}
n := binary.BigEndian.Uint32(b[0:4])
if n == 0 {
return nil
}
valOffset := 4 + n*keyLen + 4*n
if uint32(len(b)) < valOffset {
fmt.Println("walkAccounts account")
return fmt.Errorf("decode: input too short (%d bytes, expected at least %d bytes)", len(b), valOffset)
}
totalValLength := binary.BigEndian.Uint32(b[valOffset-4 : valOffset])
if uint32(len(b)) < valOffset+totalValLength {
return fmt.Errorf("decode: input too short (%d bytes, expected at least %d bytes)", len(b), valOffset+totalValLength)
}
for i := uint32(0); i < n; i++ {
key := b[4+i*keyLen : 4+(i+1)*keyLen]
idx0 := uint32(0)
if i > 0 {
idx0 = binary.BigEndian.Uint32(b[4+n*keyLen+4*(i-1) : 4+n*keyLen+4*i])
}
idx1 := binary.BigEndian.Uint32(b[4+n*keyLen+4*i : 4+n*keyLen+4*(i+1)])
val := b[valOffset+idx0 : valOffset+idx1]
err := f(key, val)
if err != nil {
return err
}
}
return nil
}
type storageChangeSetPlainBytesOld []byte
func (b storageChangeSetPlainBytesOld) Walk(f func(k, v []byte) error) error {
return walkStorageChangeSet(b, common.AddressLength, f)
}
func walkStorageChangeSet(b []byte, keyPrefixLen int, f func(k, v []byte) error) error {
if len(b) == 0 {
return nil
}
if len(b) < 4 {
return fmt.Errorf("decode: input too short (%d bytes)", len(b))
}
numOfUniqueElements := int(binary.BigEndian.Uint32(b))
if numOfUniqueElements == 0 {
return nil
}
incarnatonsInfo := 4 + numOfUniqueElements*(keyPrefixLen+4)
numOfNotDefaultIncarnations := int(binary.BigEndian.Uint32(b[incarnatonsInfo:]))
incarnatonsStart := incarnatonsInfo + 4
notDefaultIncarnations := make(map[uint32]uint64, numOfNotDefaultIncarnations)
if numOfNotDefaultIncarnations > 0 {
for i := 0; i < numOfNotDefaultIncarnations; i++ {
notDefaultIncarnations[binary.BigEndian.Uint32(b[incarnatonsStart+i*12:])] = binary.BigEndian.Uint64(b[incarnatonsStart+i*12+4:])
}
}
keysStart := incarnatonsStart + numOfNotDefaultIncarnations*12
numOfElements := int(binary.BigEndian.Uint32(b[incarnatonsInfo-4:]))
valsInfoStart := keysStart + numOfElements*common.HashLength
var addressHashID uint32
var id int
k := make([]byte, keyPrefixLen+common.HashLength+common.IncarnationLength)
for i := 0; i < numOfUniqueElements; i++ {
var (
startKeys int
endKeys int
)
if i > 0 {
startKeys = int(binary.BigEndian.Uint32(b[4+i*(keyPrefixLen)+(i-1)*4 : 4+i*(keyPrefixLen)+(i)*4]))
}
endKeys = int(binary.BigEndian.Uint32(b[4+(i+1)*(keyPrefixLen)+i*4:]))
addrBytes := b[4+i*(keyPrefixLen)+i*4:] // hash or raw address
incarnation := changeset.DefaultIncarnation
if inc, ok := notDefaultIncarnations[addressHashID]; ok {
incarnation = inc
}
for j := startKeys; j < endKeys; j++ {
copy(k[:keyPrefixLen], addrBytes[:keyPrefixLen])
binary.BigEndian.PutUint64(k[keyPrefixLen:], incarnation)
copy(k[keyPrefixLen+common.IncarnationLength:keyPrefixLen+common.HashLength+common.IncarnationLength], b[keysStart+j*common.HashLength:])
val, innerErr := findValue(b[valsInfoStart:], id)
if innerErr != nil {
return innerErr
}
err := f(k, val)
if err != nil {
return err
}
id++
}
addressHashID++
}
return nil
}
func findValue(b []byte, i int) ([]byte, error) {
numOfUint8 := int(binary.BigEndian.Uint32(b[0:]))
numOfUint16 := int(binary.BigEndian.Uint32(b[4:]))
numOfUint32 := int(binary.BigEndian.Uint32(b[8:]))
//after num of values
lenOfValsStartPointer := 12
valsPointer := lenOfValsStartPointer + numOfUint8 + numOfUint16*2 + numOfUint32*4
var (
lenOfValStart int
lenOfValEnd int
)
switch {
case i < numOfUint8:
lenOfValEnd = int(b[lenOfValsStartPointer+i])
if i > 0 {
lenOfValStart = int(b[lenOfValsStartPointer+i-1])
}
case i < numOfUint8+numOfUint16:
one := (i-numOfUint8)*2 + numOfUint8
lenOfValEnd = int(binary.BigEndian.Uint16(b[lenOfValsStartPointer+one : lenOfValsStartPointer+one+2]))
if i-1 < numOfUint8 {
lenOfValStart = int(b[lenOfValsStartPointer+i-1])
} else {
one = (i-1)*2 - numOfUint8
lenOfValStart = int(binary.BigEndian.Uint16(b[lenOfValsStartPointer+one : lenOfValsStartPointer+one+2]))
}
case i < numOfUint8+numOfUint16+numOfUint32:
one := lenOfValsStartPointer + numOfUint8 + numOfUint16*2 + (i-numOfUint8-numOfUint16)*4
lenOfValEnd = int(binary.BigEndian.Uint32(b[one : one+4]))
if i-1 < numOfUint8+numOfUint16 {
one = lenOfValsStartPointer + (i-1)*2 - numOfUint8
lenOfValStart = int(binary.BigEndian.Uint16(b[one : one+2]))
} else {
one = lenOfValsStartPointer + numOfUint8 + numOfUint16*2 + (i-1-numOfUint8-numOfUint16)*4
lenOfValStart = int(binary.BigEndian.Uint32(b[one : one+4]))
}
default:
return nil, changeset.ErrFindValue
}
return b[valsPointer+lenOfValStart : valsPointer+lenOfValEnd], nil
}
//nolint:unused,deadcode
type contractKeys struct {
AddrBytes []byte // either a hash of address or raw address
Incarnation uint64
Keys [][]byte
Vals [][]byte
}
//nolint:unused,deadcode
func encodeStorage(s *changeset.ChangeSet, keyPrefixLen uint32) ([]byte, error) {
sort.Sort(s)
var err error
buf := new(bytes.Buffer)
uint16Arr := make([]byte, 2)
uint32Arr := make([]byte, 4)
numOfElements := s.Len()
keys := make([]contractKeys, 0, numOfElements)
valLengthes := make([]byte, 0, numOfElements)
var (
currentContract contractKeys
numOfUint8 uint32
numOfUint16 uint32
numOfUint32 uint32
lengthOfValues uint32
)
var nonDefaultIncarnationCounter uint32
//first 4 bytes - len. body - []{idOfAddrHash(4) + incarnation(8)}
notDefaultIncarnationsBytes := make([]byte, 4)
b := make([]byte, 12)
currentKey := -1
for i, change := range s.Changes {
addrBytes := change.Key[0:keyPrefixLen] // hash or raw address
incarnation := binary.BigEndian.Uint64(change.Key[keyPrefixLen:])
keyBytes := change.Key[keyPrefixLen+common.IncarnationLength : keyPrefixLen+common.HashLength+common.IncarnationLength] // hash or raw key
//found new contract address
if i == 0 || !bytes.Equal(currentContract.AddrBytes, addrBytes) || currentContract.Incarnation != incarnation {
currentKey++
currentContract.AddrBytes = addrBytes
currentContract.Incarnation = incarnation
//add to incarnations part only if it's not default
if incarnation != changeset.DefaultIncarnation {
binary.BigEndian.PutUint32(b[0:], uint32(currentKey))
binary.BigEndian.PutUint64(b[4:], incarnation)
notDefaultIncarnationsBytes = append(notDefaultIncarnationsBytes, b...)
nonDefaultIncarnationCounter++
}
currentContract.Keys = [][]byte{keyBytes}
currentContract.Vals = [][]byte{change.Value}
keys = append(keys, currentContract)
} else {
//add key and value
currentContract.Keys = append(currentContract.Keys, keyBytes)
currentContract.Vals = append(currentContract.Vals, change.Value)
}
//calculate lengths of values
lengthOfValues += uint32(len(change.Value))
switch {
case lengthOfValues <= 255:
valLengthes = append(valLengthes, uint8(lengthOfValues))
numOfUint8++
case lengthOfValues <= 65535:
binary.BigEndian.PutUint16(uint16Arr, uint16(lengthOfValues))
valLengthes = append(valLengthes, uint16Arr...)
numOfUint16++
default:
binary.BigEndian.PutUint32(uint32Arr, lengthOfValues)
valLengthes = append(valLengthes, uint32Arr...)
numOfUint32++
}
//save to array
keys[currentKey] = currentContract
}
// save numOfUniqueContracts
binary.BigEndian.PutUint32(uint32Arr, uint32(len(keys)))
if _, err = buf.Write(uint32Arr); err != nil {
return nil, err
}
if len(keys) == 0 {
return nil, fmt.Errorf("incorrect data")
}
// save addrHashes + endOfKeys
var endNumOfKeys int
for i := 0; i < len(keys); i++ {
if _, err = buf.Write(keys[i].AddrBytes); err != nil {
return nil, err
}
endNumOfKeys += len(keys[i].Keys)
//end of keys
binary.BigEndian.PutUint32(uint32Arr, uint32(endNumOfKeys))
if _, err = buf.Write(uint32Arr); err != nil {
return nil, err
}
}
if endNumOfKeys != numOfElements {
return nil, fmt.Errorf("incorrect number of elements must:%v current:%v", numOfElements, endNumOfKeys)
}
// save not default incarnations
binary.BigEndian.PutUint32(notDefaultIncarnationsBytes, nonDefaultIncarnationCounter)
if _, err = buf.Write(notDefaultIncarnationsBytes); err != nil {
return nil, err
}
// save keys
for _, group := range keys {
for _, v := range group.Keys {
if _, err = buf.Write(v); err != nil {
return nil, err
}
}
}
// save lengths of values
binary.BigEndian.PutUint32(uint32Arr, numOfUint8)
if _, err = buf.Write(uint32Arr); err != nil {
return nil, err
}
binary.BigEndian.PutUint32(uint32Arr, numOfUint16)
if _, err = buf.Write(uint32Arr); err != nil {
return nil, err
}
binary.BigEndian.PutUint32(uint32Arr, numOfUint32)
if _, err = buf.Write(uint32Arr); err != nil {
return nil, err
}
if _, err = buf.Write(valLengthes); err != nil {
return nil, err
}
// save values
for _, v := range keys {
for _, val := range v.Vals {
if _, err = buf.Write(val); err != nil {
return nil, err
}
}
}
return buf.Bytes(), nil
}
/*
AccountChangeSet is serialized in the following manner in order to facilitate binary search:
1. The number of keys N (uint32, 4 bytes).
2. Contiguous array of keys (N*M bytes).
3. Contiguous array of accumulating value indexes:
len(val0), len(val0)+len(val1), ..., len(val0)+len(val1)+...+len(val_{N-1})
(4*N bytes since the lengths are treated as uint32).
4. Contiguous array of values.
uint32 integers are serialized as big-endian.
*/
//nolint:unused,deadcode
func encodeAccounts(s *changeset.ChangeSet) ([]byte, error) {
sort.Sort(s)
buf := new(bytes.Buffer)
intArr := make([]byte, 4)
n := s.Len()
binary.BigEndian.PutUint32(intArr, uint32(n))
_, err := buf.Write(intArr)
if err != nil {
return nil, err
}
for i := 0; i < n; i++ {
_, err = buf.Write(s.Changes[i].Key)
if err != nil {
return nil, err
}
}
var l int
for i := 0; i < n; i++ {
l += len(s.Changes[i].Value)
binary.BigEndian.PutUint32(intArr, uint32(l))
_, err = buf.Write(intArr)
if err != nil {
return nil, err
}
}
for i := 0; i < n; i++ {
_, err = buf.Write(s.Changes[i].Value)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
// ---- Copy-Paste of code to decode ChangeSets: End -----
| 1 | 21,991 | Don't need to clear them - because nobody have data there, also can don't delete buckets - just leave them, new nodes will not have them if remove bucket from buckets.go | ledgerwatch-erigon | go |
@@ -0,0 +1,17 @@
+/* Copyright 2016 Google Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.api.codegen.viewmodel;
+
+public interface ApiMethodView {} | 1 | 1 | 16,101 | Similarly, this has only one implementing class | googleapis-gapic-generator | java |
|
@@ -22,7 +22,7 @@ module Selenium
class Common
MAX_REDIRECTS = 20 # same as chromium/gecko
CONTENT_TYPE = 'application/json'.freeze
- DEFAULT_HEADERS = {'Accept' => CONTENT_TYPE}.freeze
+ DEFAULT_HEADERS = {'Accept' => CONTENT_TYPE, 'Content-Type' => 'application/x-www-form-urlencoded'}.freeze
attr_accessor :timeout
attr_writer :server_url | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Remote
module Http
class Common
MAX_REDIRECTS = 20 # same as chromium/gecko
CONTENT_TYPE = 'application/json'.freeze
DEFAULT_HEADERS = {'Accept' => CONTENT_TYPE}.freeze
attr_accessor :timeout
attr_writer :server_url
def initialize
@timeout = nil
end
def quit_errors
[IOError]
end
def close
# hook for subclasses - will be called on Driver#quit
end
def call(verb, url, command_hash)
url = server_url.merge(url) unless url.is_a?(URI)
headers = DEFAULT_HEADERS.dup
headers['Cache-Control'] = 'no-cache' if verb == :get
if command_hash
payload = JSON.generate(command_hash)
headers['Content-Type'] = "#{CONTENT_TYPE}; charset=utf-8"
headers['Content-Length'] = payload.bytesize.to_s if [:post, :put].include?(verb)
WebDriver.logger.info(" >>> #{url} | #{payload}")
WebDriver.logger.debug(" > #{headers.inspect}")
elsif verb == :post
payload = '{}'
headers['Content-Length'] = '2'
end
request verb, url, headers, payload
end
private
def server_url
return @server_url if @server_url
raise Error::WebDriverError, 'server_url not set'
end
def request(*)
raise NotImplementedError, 'subclass responsibility'
end
def create_response(code, body, content_type)
code = code.to_i
body = body.to_s.strip
content_type = content_type.to_s
WebDriver.logger.info("<- #{body}")
if content_type.include? CONTENT_TYPE
raise Error::WebDriverError, "empty body: #{content_type.inspect} (#{code})\n#{body}" if body.empty?
Response.new(code, JSON.parse(body))
elsif code == 204
Response.new(code)
else
msg = "unexpected response, code=#{code}, content-type=#{content_type.inspect}"
msg << "\n#{body}" unless body.empty?
raise Error::WebDriverError, msg
end
end
end # Common
end # Http
end # Remote
end # WebDriver
end # Selenium
| 1 | 15,359 | Does it send requests with urlencoded bodies anywhere? I thought it sends only json. Maybe content-type should be `application/json` by default? | SeleniumHQ-selenium | rb |
@@ -268,7 +268,7 @@ class StubsGenerator
/**
* @return PhpParser\Node\Identifier|PhpParser\Node\Name|PhpParser\Node\NullableType|null
*/
- public static function getParserTypeFromPsalmType(Type\Union $type)
+ public static function getParserTypeFromPsalmType(Type\Union $type): ?PhpParser\NodeAbstract
{
$nullable = $type->isNullable();
| 1 | <?php
namespace Psalm\Internal\Stubs\Generator;
use PhpParser;
use Psalm\Internal\Scanner\ParsedDocblock;
use Psalm\Type;
class StubsGenerator
{
public static function getAll(
\Psalm\Codebase $codebase,
\Psalm\Internal\Provider\ClassLikeStorageProvider $class_provider,
\Psalm\Internal\Provider\FileStorageProvider $file_provider
) : string {
$namespaced_nodes = [];
$psalm_base = dirname(__DIR__, 5);
foreach ($class_provider->getAll() as $storage) {
if (\strpos($storage->name, 'Psalm\\') === 0) {
continue;
}
if ($storage->location
&& strpos($storage->location->file_path, $psalm_base) === 0
) {
continue;
}
if ($storage->stubbed) {
continue;
}
$name_parts = explode('\\', $storage->name);
$classlike_name = array_pop($name_parts);
$namespace_name = implode('\\', $name_parts);
if (!isset($namespaced_nodes[$namespace_name])) {
$namespaced_nodes[$namespace_name] = [];
}
$namespaced_nodes[$namespace_name][$classlike_name] = ClassLikeStubGenerator::getClassLikeNode(
$codebase,
$storage,
$classlike_name
);
}
$all_function_names = [];
foreach ($codebase->functions->getAllStubbedFunctions() as $function_storage) {
if ($function_storage->location
&& \strpos($function_storage->location->file_path, $psalm_base) === 0
) {
continue;
}
if (!$function_storage->cased_name) {
throw new \UnexpectedValueException('very bad');
}
$fq_name = $function_storage->cased_name;
$all_function_names[$fq_name] = true;
$name_parts = explode('\\', $fq_name);
$function_name = array_pop($name_parts);
$namespace_name = implode('\\', $name_parts);
$namespaced_nodes[$namespace_name][$fq_name] = self::getFunctionNode(
$function_storage,
$function_name,
$namespace_name
);
}
foreach ($codebase->getAllStubbedConstants() as $fq_name => $type) {
if ($type->isMixed()) {
continue;
}
$name_parts = explode('\\', $fq_name);
$constant_name = array_pop($name_parts);
$namespace_name = implode('\\', $name_parts);
$namespaced_nodes[$namespace_name][$fq_name] = new PhpParser\Node\Stmt\Const_(
[
new PhpParser\Node\Const_(
$constant_name,
self::getExpressionFromType($type)
)
]
);
}
foreach ($file_provider->getAll() as $file_storage) {
if (\strpos($file_storage->file_path, $psalm_base) === 0) {
continue;
}
foreach ($file_storage->functions as $function_storage) {
if (!$function_storage->cased_name) {
continue;
}
$fq_name = $function_storage->cased_name;
if (isset($all_function_names[$fq_name])) {
continue;
}
$all_function_names[$fq_name] = true;
$name_parts = explode('\\', $fq_name);
$function_name = array_pop($name_parts);
$namespace_name = implode('\\', $name_parts);
$namespaced_nodes[$namespace_name][$fq_name] = self::getFunctionNode(
$function_storage,
$function_name,
$namespace_name
);
}
foreach ($file_storage->constants as $fq_name => $type) {
if ($type->isMixed()) {
continue;
}
if ($type->isMixed()) {
continue;
}
$name_parts = explode('\\', $fq_name);
$constant_name = array_pop($name_parts);
$namespace_name = implode('\\', $name_parts);
$namespaced_nodes[$namespace_name][$fq_name] = new PhpParser\Node\Stmt\Const_(
[
new PhpParser\Node\Const_(
$constant_name,
self::getExpressionFromType($type)
)
]
);
}
}
ksort($namespaced_nodes);
$namespace_stmts = [];
foreach ($namespaced_nodes as $namespace_name => $stmts) {
ksort($stmts);
$namespace_stmts[] = new PhpParser\Node\Stmt\Namespace_(
$namespace_name ? new PhpParser\Node\Name($namespace_name) : null,
array_values($stmts),
['kind' => PhpParser\Node\Stmt\Namespace_::KIND_BRACED]
);
}
$prettyPrinter = new PhpParser\PrettyPrinter\Standard;
return $prettyPrinter->prettyPrintFile($namespace_stmts);
}
private static function getFunctionNode(
\Psalm\Storage\FunctionLikeStorage $function_storage,
string $function_name,
string $namespace_name
) : PhpParser\Node\Stmt\Function_ {
$docblock = new ParsedDocblock('', []);
foreach ($function_storage->template_types ?: [] as $template_name => $map) {
$type = array_values($map)[0][0];
$docblock->tags['template'][] = $template_name . ' as ' . $type->toNamespacedString(
$namespace_name,
[],
null,
false
);
}
foreach ($function_storage->params as $param) {
if ($param->type && $param->type !== $param->signature_type) {
$docblock->tags['param'][] = $param->type->toNamespacedString(
$namespace_name,
[],
null,
false
) . ' $' . $param->name;
}
}
if ($function_storage->return_type
&& $function_storage->signature_return_type !== $function_storage->return_type
) {
$docblock->tags['return'][] = $function_storage->return_type->toNamespacedString(
$namespace_name,
[],
null,
false
);
}
foreach ($function_storage->throws ?: [] as $exception_name => $_) {
$docblock->tags['throws'][] = Type::getStringFromFQCLN(
$exception_name,
$namespace_name,
[],
null,
false
);
}
return new PhpParser\Node\Stmt\Function_(
$function_name,
[
'params' => self::getFunctionParamNodes($function_storage),
'returnType' => $function_storage->signature_return_type
? self::getParserTypeFromPsalmType($function_storage->signature_return_type)
: null,
'stmts' => [],
],
[
'comments' => $docblock->tags
? [
new PhpParser\Comment\Doc(
\rtrim($docblock->render(' '))
)
]
: []
]
);
}
/**
* @return list<PhpParser\Node\Param>
*/
public static function getFunctionParamNodes(\Psalm\Storage\FunctionLikeStorage $method_storage): array
{
$param_nodes = [];
foreach ($method_storage->params as $param) {
$param_nodes[] = new PhpParser\Node\Param(
new PhpParser\Node\Expr\Variable($param->name),
$param->default_type
? self::getExpressionFromType($param->default_type)
: null,
$param->signature_type
? self::getParserTypeFromPsalmType($param->signature_type)
: null,
$param->by_ref,
$param->is_variadic
);
}
return $param_nodes;
}
/**
* @return PhpParser\Node\Identifier|PhpParser\Node\Name|PhpParser\Node\NullableType|null
*/
public static function getParserTypeFromPsalmType(Type\Union $type)
{
$nullable = $type->isNullable();
foreach ($type->getAtomicTypes() as $atomic_type) {
if ($atomic_type instanceof Type\Atomic\TNull) {
continue;
}
if ($atomic_type instanceof Type\Atomic\Scalar
|| $atomic_type instanceof Type\Atomic\TObject
|| $atomic_type instanceof Type\Atomic\TArray
|| $atomic_type instanceof Type\Atomic\TIterable
) {
$identifier_string = $atomic_type->toPhpString(null, [], null, 8, 0);
if ($identifier_string === null) {
throw new \UnexpectedValueException(
$atomic_type->getId() . ' could not be converted to an identifier'
);
}
$identifier = new PhpParser\Node\Identifier($identifier_string);
if ($nullable) {
return new PhpParser\Node\NullableType($identifier);
}
return $identifier;
}
if ($atomic_type instanceof Type\Atomic\TNamedObject) {
$name_node = new PhpParser\Node\Name\FullyQualified($atomic_type->value);
if ($nullable) {
return new PhpParser\Node\NullableType($name_node);
}
return $name_node;
}
}
}
public static function getExpressionFromType(Type\Union $type) : PhpParser\Node\Expr
{
foreach ($type->getAtomicTypes() as $atomic_type) {
if ($atomic_type instanceof Type\Atomic\TLiteralString) {
return new PhpParser\Node\Scalar\String_($atomic_type->value);
}
if ($atomic_type instanceof Type\Atomic\TLiteralInt) {
return new PhpParser\Node\Scalar\LNumber($atomic_type->value);
}
if ($atomic_type instanceof Type\Atomic\TLiteralFloat) {
return new PhpParser\Node\Scalar\DNumber($atomic_type->value);
}
if ($atomic_type instanceof Type\Atomic\TFalse) {
return new PhpParser\Node\Expr\ConstFetch(new PhpParser\Node\Name('false'));
}
if ($atomic_type instanceof Type\Atomic\TTrue) {
return new PhpParser\Node\Expr\ConstFetch(new PhpParser\Node\Name('true'));
}
if ($atomic_type instanceof Type\Atomic\TNull) {
return new PhpParser\Node\Expr\ConstFetch(new PhpParser\Node\Name('null'));
}
if ($atomic_type instanceof Type\Atomic\TArray) {
return new PhpParser\Node\Expr\Array_([]);
}
}
return new PhpParser\Node\Scalar\String_('Psalm could not infer this type');
}
}
| 1 | 9,160 | I must have forgotten that one in previous PR | vimeo-psalm | php |
@@ -1,10 +1,8 @@
require "test_helper"
class DiaryCommentTest < ActiveSupport::TestCase
- api_fixtures
- fixtures :diary_comments
-
def test_diary_comment_count
- assert_equal 4, DiaryComment.count
+ comment = create(:diary_comment)
+ assert_includes DiaryComment.all, comment
end
end | 1 | require "test_helper"
class DiaryCommentTest < ActiveSupport::TestCase
api_fixtures
fixtures :diary_comments
def test_diary_comment_count
assert_equal 4, DiaryComment.count
end
end
| 1 | 10,079 | This test name doesn't really reflect what the test does any more... Then again I'm not really sure what it is testing now - is it actually just testing that FactoryGirl can create records? or does that itself funnel through the rails code so that we're testing rails can create records? | openstreetmap-openstreetmap-website | rb |
@@ -1423,8 +1423,12 @@ func (m *executor) putCStorVolumeReplica() (err error) {
// putUpgradeResult will put an upgrade result as defined in the task
func (m *executor) putUpgradeResult() (err error) {
+ raw, err := template.AsTemplatedBytes("UpgradeResult", m.Runtask.Spec.Task, m.Values)
+ if err != nil {
+ return
+ }
uresult, err := upgraderesult.
- BuilderForRuntask("UpgradeResult", m.Runtask.Spec.Task, m.Values).
+ BuilderForTemplateObject(raw).
Build()
if err != nil {
return | 1 | /*
Copyright 2017 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package task
import (
"encoding/json"
"fmt"
//"fmt"
"strings"
"time"
"github.com/golang/glog"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
stringer "github.com/openebs/maya/pkg/apis/stringer/v1alpha1"
m_k8s_client "github.com/openebs/maya/pkg/client/k8s"
cstorpool "github.com/openebs/maya/pkg/cstorpool/v1alpha2"
cstorvolume "github.com/openebs/maya/pkg/cstorvolume/v1alpha1"
errors "github.com/openebs/maya/pkg/errors/v1alpha1"
m_k8s "github.com/openebs/maya/pkg/k8s"
deploy_appsv1 "github.com/openebs/maya/pkg/kubernetes/deployment/appsv1/v1alpha1"
deploy_extnv1beta1 "github.com/openebs/maya/pkg/kubernetes/deployment/extnv1beta1/v1alpha1"
patch "github.com/openebs/maya/pkg/kubernetes/patch/v1alpha1"
pod "github.com/openebs/maya/pkg/kubernetes/pod/v1alpha1"
podexec "github.com/openebs/maya/pkg/kubernetes/podexec/v1alpha1"
replicaset "github.com/openebs/maya/pkg/kubernetes/replicaset/v1alpha1"
service "github.com/openebs/maya/pkg/kubernetes/service/v1alpha1"
storagepool "github.com/openebs/maya/pkg/storagepool/v1alpha1"
"github.com/openebs/maya/pkg/template"
templatefuncs "github.com/openebs/maya/pkg/templatefuncs/v1alpha1"
upgraderesult "github.com/openebs/maya/pkg/upgrade/result/v1alpha1"
"github.com/openebs/maya/pkg/util"
api_apps_v1 "k8s.io/api/apps/v1"
api_apps_v1beta1 "k8s.io/api/apps/v1beta1"
api_batch_v1 "k8s.io/api/batch/v1"
api_core_v1 "k8s.io/api/core/v1"
api_extn_v1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
// ErrorUnSupportedTask is used to throw error
// for the tasks which are not supported by
// the executor instance(s)
ErrorUnSupportedTask error = errors.New("task not supported")
)
// Executor provides the contract to execute
// RunTasks
type Executor interface {
Execute() (err error)
}
// OutputExecutor provides the contract to
// generate content from a RunTask's
// specifications
//
// NOTE:
// The output format is specified in the
// RunTask itself
type OutputExecutor interface {
Output() (output []byte, err error)
}
type executor struct {
// Values is applied against the
// task's specification (~ a go template)
Values map[string]interface{}
// MetaExec is used to execute meta
// operations on this task
MetaExec *MetaExecutor
// Runtask defines a task & operations
// associated with it
Runtask *v1alpha1.RunTask
}
// newExecutor returns a new instance of
// executor
func newExecutor(rt *v1alpha1.RunTask, values map[string]interface{}) (*executor, error) {
mte, err := NewMetaExecutor(rt.Spec.Meta, values)
if err != nil {
return nil,
errors.Wrapf(err, "failed to init task executor: failed to init meta executor: %s %s", rt, stringer.Yaml("template values", values))
}
return &executor{
Values: values,
MetaExec: mte,
Runtask: rt,
}, nil
}
// String is the Stringer implementation
// of executor
func (m *executor) String() string {
return stringer.Yaml("task executor", m)
}
// GoString is the GoStringer implementation
// of executor
func (m *executor) GoString() string {
return stringer.Yaml("task executor", m)
}
// getTaskIdentity gets the task identity
func (m *executor) getTaskIdentity() string {
return m.MetaExec.getIdentity()
}
// getTaskObjectName gets the task's object name
func (m *executor) getTaskObjectName() string {
return m.MetaExec.getObjectName()
}
// getTaskRunNamespace gets the namespace where
// RunTask should get executed
func (m *executor) getTaskRunNamespace() string {
return m.MetaExec.getRunNamespace()
}
// getK8sClient gets the kubernetes client to execute this task
func (m *executor) getK8sClient() *m_k8s_client.K8sClient {
return m.MetaExec.getK8sClient()
}
// Output returns the result of templating a
// RunTask meant for templating only purpose
//
// NOTE:
// This implements OutputExecutor interface
func (m *executor) Output() ([]byte, error) {
output, err := template.AsTemplatedBytes(
"output",
m.Runtask.Spec.Task,
m.Values,
)
if err != nil {
return nil, errors.Wrapf(errors.WithStack(err), "failed to generate output: %s", m)
}
return output, nil
}
// getNotFoundError fetches NotFound error if any; post
// the execution of this runtask. This is extracted from
// the updated template values
//
// NOTE:
// Logic to determine NotFound error is specified at
// Post property which is executed after the task's
// execution.
//
// NOTE:
// In case of NotFound error, template values
// is set with NotFound error against below
// nested key
//
// .TaskResult.<taskID>.notFoundErr
func (m *executor) getNotFoundError() interface{} {
return util.GetNestedField(
m.Values,
string(v1alpha1.TaskResultTLP),
m.getTaskIdentity(),
string(v1alpha1.TaskResultNotFoundErrTRTP),
)
}
// getVersionMismatchError fetches VersionMismatch error
// if any; post the execution of this runtask
//
// NOTE:
// Logic to determine VersionMismatch error is specified at
// Post property which is executed after the task's execution.
//
// NOTE:
// In case of VersionMismatch error, template values
// is set with VersionMismatch error against below nested
// key
//
// .TaskResult.<taskID>.versionMismatchErr
func (m *executor) getTaskResultVersionMismatchError() interface{} {
return util.GetNestedField(
m.Values,
string(v1alpha1.TaskResultTLP),
m.getTaskIdentity(),
string(v1alpha1.TaskResultVersionMismatchErrTRTP),
)
}
// getVerifyError fetches the verification error if any;
// post the execution of this runtask
//
// NOTE:
// Logic to determine Verify error is specified at Post
// property which is executed after the task's execution.
//
// NOTE:
// In case of Verify error, template values is
// set with Verify error against below nested key
//
// .TaskResult.<taskID>.verifyErr
func (m *executor) getVerifyError() interface{} {
return util.GetNestedField(
m.Values,
string(v1alpha1.TaskResultTLP),
m.getTaskIdentity(),
string(v1alpha1.TaskResultVerifyErrTRTP),
)
}
// resetVerifyError resets verification error if any;
// post the execution of this runtask
//
// NOTE:
// If a runtask results in Verify error, its execution
// can be retried by reseting Verify error
//
// NOTE:
// Reset here implies setting the verification error's
// placeholder value to nil
//
// Below property is reset with 'nil':
// .TaskResult.<taskID>.verifyErr
func (m *executor) resetTaskResultVerifyError() {
util.SetNestedField(
m.Values,
nil,
string(v1alpha1.TaskResultTLP),
m.getTaskIdentity(),
string(v1alpha1.TaskResultVerifyErrTRTP),
)
}
// repeatWith repeats execution of the task based on
// repeatWith property of meta task specifications.
//
// NOTE:
// With this property RunTask can be executed repeatedly
// based on the resource names set against the repeatWith
// property.
//
// NOTE:
// Each task execution depends on the current resource
// index
func (m *executor) repeatWith() (err error) {
rptExec := m.MetaExec.getRepeatExecutor()
if !rptExec.isRepeat() {
// no need to repeat if this task
// is not meant to be repeated;
// so execute once & return
err = m.retryOnVerificationError()
return
}
// execute the task based on each repeat
repeats := rptExec.len()
var (
rptMetaExec *MetaExecutor
current string
)
for idx := 0; idx < repeats; idx++ {
// fetch a new repeat meta task instance
rptMetaExec, err = m.MetaExec.asRepeatInstance(idx)
if err != nil {
// stop repetition on unhandled runtime errors
// & return
return
}
// mutate the original meta task executor
// to this repeater instance
m.MetaExec = rptMetaExec
// set the currently active repeat item
current, err = m.MetaExec.repeater.getItem(idx)
if err != nil {
// stop repetition on unhandled runtime error
// & return
return
}
util.SetNestedField(
m.Values,
current,
string(v1alpha1.ListItemsTLP),
string(v1alpha1.CurrentRepeatResourceLITP),
)
// execute the task function finally
err = m.retryOnVerificationError()
if err != nil {
// stop repetition on unhandled runtime error
// & return
return
}
}
return
}
// retryOnVerificationError retries execution of the task
// if the task execution resulted into verification error.
// The number of retry attempts & interval between each
// attempt is specified in the task's meta specification.
func (m *executor) retryOnVerificationError() (err error) {
retryAttempts, interval := m.MetaExec.getRetry()
// original invocation as well as all retry attempts
// i == 0 implies original task execute invocation
// i > 0 implies a retry operation
for i := 0; i <= retryAttempts; i++ {
// first reset the previous verify error if any
m.resetTaskResultVerifyError()
// execute the task function
err = m.ExecuteIt()
if err != nil {
// break this retry execution loop
// if there were any runtime errors
return
}
// check for VerifyError if any
//
// NOTE:
// VerifyError is a handled runtime error
// which is set via templating
//
// NOTE:
// retry is done only if VerifyError is
// set during post task execution
verifyErr := m.getVerifyError()
if verifyErr == nil {
// no need to retry if task execution was a
// success i.e. there was no verification error
// found with the task result
return
}
// current verify error
err, _ = verifyErr.(*templatefuncs.VerifyError)
if i != retryAttempts {
glog.Warningf(
"verify error was found for runtask {%s}: error {%s}: will retry task execution-'%d'",
m.getTaskIdentity(),
err,
i+1,
)
// will retry after the specified interval
time.Sleep(interval)
}
}
// return after exhausting the original invocation
// and all retries; verification error of the final
// attempt will be returned here
return
}
// Execute executes a runtask by following the
// directives specified in the runtask's meta
// specifications
func (m *executor) Execute() (err error) {
if m.MetaExec.isDisabled() {
// do nothing if runtask is disabled
return
}
return m.repeatWith()
}
// postExecuteIt executes a go template against
// the provided template values. This is run
// after executing a task.
//
// NOTE:
// This go template is a set of template functions
// that queries specified properties from the result
// of task's execution & stores them at placeholders
// within the **template values**. These stored values
// can later be queried by subsequent runtasks.
func (m *executor) postExecuteIt() (err error) {
if m.Runtask == nil || len(m.Runtask.Spec.PostRun) == 0 {
// do nothing if post specs is empty
return
}
// post runtask operation
_, err = template.AsTemplatedBytes(
"PostRun",
m.Runtask.Spec.PostRun,
m.Values,
)
if err != nil {
// return any un-handled runtime error
return
}
// verMismatchErr is a handled runtime error i.e.
// is set in template values. This needs to be
// extracted and thrown as VersionMismatchError
verMismatchErr := m.getTaskResultVersionMismatchError()
if verMismatchErr != nil {
glog.Warningf(
"version mismatch error at runtask {%s}: error {%s}",
m.getTaskIdentity(),
verMismatchErr,
)
err, _ = verMismatchErr.(*templatefuncs.VersionMismatchError)
return
}
// notFoundErr is a handled runtime error i.e. is
// set is in template values. This needs to be
// extracted and thrown as NotFoundError
notFoundErr := m.getNotFoundError()
if notFoundErr != nil {
glog.Warningf(
"notfound error at runtask {%s}: error {%s}",
m.getTaskIdentity(),
notFoundErr,
)
err, _ = notFoundErr.(*templatefuncs.NotFoundError)
return
}
return nil
}
// ExecuteIt will execute the runtask based on
// its meta & task specifications
func (m *executor) ExecuteIt() (err error) {
if m.getK8sClient() == nil {
return errors.Errorf("failed to execute task: nil k8s client: verify if namespace is set: %s", m)
}
// kind as command is a special case of task execution
if m.MetaExec.isCommand() {
return m.postExecuteIt()
}
if m.MetaExec.isRolloutstatus() {
err = m.rolloutStatus()
} else if m.MetaExec.isPutExtnV1B1Deploy() {
err = m.putExtnV1B1Deploy()
} else if m.MetaExec.isPutAppsV1B1Deploy() {
err = m.putAppsV1B1Deploy()
} else if m.MetaExec.isPatchExtnV1B1Deploy() {
err = m.patchExtnV1B1Deploy()
} else if m.MetaExec.isPatchAppsV1B1Deploy() {
err = m.patchAppsV1B1Deploy()
} else if m.MetaExec.isPatchOEV1alpha1SPC() {
err = m.patchOEV1alpha1SPC()
} else if m.MetaExec.isPatchOEV1alpha1CSPC() {
err = m.patchOEV1alpha1CSPC()
} else if m.MetaExec.isPutCoreV1Service() {
err = m.putCoreV1Service()
} else if m.MetaExec.isPatchCoreV1Service() {
err = m.patchCoreV1Service()
} else if m.MetaExec.isDeleteExtnV1B1Deploy() {
err = m.deleteExtnV1B1Deployment()
} else if m.MetaExec.isDeleteExtnV1B1ReplicaSet() {
err = m.deleteExtnV1B1ReplicaSet()
} else if m.MetaExec.isGetExtnV1B1Deploy() {
err = m.getExtnV1B1Deployment()
} else if m.MetaExec.isGetExtnV1B1ReplicaSet() {
err = m.getExtnV1B1ReplicaSet()
} else if m.MetaExec.isGetCoreV1Pod() {
err = m.getCoreV1Pod()
} else if m.MetaExec.isDeleteAppsV1B1Deploy() {
err = m.deleteAppsV1B1Deployment()
} else if m.MetaExec.isDeleteCoreV1Service() {
err = m.deleteCoreV1Service()
} else if m.MetaExec.isGetOEV1alpha1Disk() {
err = m.getOEV1alpha1Disk()
} else if m.MetaExec.isGetOEV1alpha1SPC() {
err = m.getOEV1alpha1SPC()
} else if m.MetaExec.isGetOEV1alpha1CSPC() {
err = m.getOEV1alpha1CSPC()
} else if m.MetaExec.isGetOEV1alpha1SP() {
err = m.getOEV1alpha1SP()
} else if m.MetaExec.isGetOEV1alpha1CSP() {
err = m.getOEV1alpha1CSP()
} else if m.MetaExec.isGetOEV1alpha1UR() {
err = m.getOEV1alpha1UR()
} else if m.MetaExec.isGetCoreV1PVC() {
err = m.getCoreV1PVC()
} else if m.MetaExec.isGetCoreV1Service() {
err = m.getCoreV1Service()
} else if m.MetaExec.isGetOEV1alpha1CSV() {
err = m.getOEV1alpha1CSV()
} else if m.MetaExec.isPutOEV1alpha1CSP() {
err = m.putCStorPool()
} else if m.MetaExec.isPutOEV1alpha1SP() {
err = m.putStoragePool()
} else if m.MetaExec.isPutOEV1alpha1CSV() {
err = m.putCStorVolume()
} else if m.MetaExec.isPutOEV1alpha1CVR() {
err = m.putCStorVolumeReplica()
} else if m.MetaExec.isPutOEV1alpha1UR() {
err = m.putUpgradeResult()
} else if m.MetaExec.isDeleteOEV1alpha1SP() {
err = m.deleteOEV1alpha1SP()
} else if m.MetaExec.isDeleteOEV1alpha1CSP() {
err = m.deleteOEV1alpha1CSP()
} else if m.MetaExec.isDeleteOEV1alpha1CSV() {
err = m.deleteOEV1alpha1CSV()
} else if m.MetaExec.isDeleteOEV1alpha1CVR() {
err = m.deleteOEV1alpha1CVR()
} else if m.MetaExec.isPatchOEV1alpha1CSV() {
err = m.patchOEV1alpha1CSV()
} else if m.MetaExec.isPatchOEV1alpha1CVR() {
err = m.patchOEV1alpha1CVR()
} else if m.MetaExec.isPatchOEV1alpha1UR() {
err = m.patchUpgradeResult()
} else if m.MetaExec.isPatchOEV1alpha1SP() {
err = m.patchStoragePool()
} else if m.MetaExec.isPatchOEV1alpha1CSP() {
err = m.patchCstorPool()
} else if m.MetaExec.isList() {
err = m.listK8sResources()
} else if m.MetaExec.isGetStorageV1SC() {
err = m.getStorageV1SC()
} else if m.MetaExec.isGetCoreV1PV() {
err = m.getCoreV1PV()
} else if m.MetaExec.isDeleteBatchV1Job() {
err = m.deleteBatchV1Job()
} else if m.MetaExec.isGetBatchV1Job() {
err = m.getBatchV1Job()
} else if m.MetaExec.isPutBatchV1Job() {
err = m.putBatchV1Job()
} else if m.MetaExec.isPutAppsV1STS() {
err = m.putAppsV1STS()
} else if m.MetaExec.isDeleteAppsV1STS() {
err = m.deleteAppsV1STS()
} else if m.MetaExec.isExecCoreV1Pod() {
err = m.execCoreV1Pod()
} else {
err = ErrorUnSupportedTask
}
if err != nil {
return errors.Wrapf(err, "failed to execute task: %s", m)
}
// run the post operations after a runtask is executed
return m.postExecuteIt()
}
// asRollbackInstance will provide the rollback
// instance associated to this task's instance
func (m *executor) asRollbackInstance(objectName string) (*executor, error) {
mte, willRollback, err := m.MetaExec.asRollbackInstance(objectName)
if err != nil {
return nil, errors.Wrapf(err, "failed to build rollback executor for object {%s}: %v", objectName, m.MetaExec)
}
if !willRollback {
// no need of rollback
return nil, nil
}
// Only the meta info is required for a rollback. In
// other words no need of task yaml template & values
return &executor{
MetaExec: mte,
}, nil
}
// asBatchV1Job generates a K8s Job object
// out of the embedded yaml
func (m *executor) asBatchV1Job() (*api_batch_v1.Job, error) {
j, err := m_k8s.NewJobYml("BatchV1Job", m.Runtask.Spec.Task, m.Values)
if err != nil {
return nil, errors.Wrap(err, "failed to build job")
}
return j.AsBatchV1Job()
}
// asAppsV1STS generates a kubernetes StatefulSet api
// instance from the yaml string specification
func (m *executor) asAppsV1STS() (*api_apps_v1.StatefulSet, error) {
s, err := m_k8s.NewSTSYml("AppsV1StatefulSet", m.Runtask.Spec.Task, m.Values)
if err != nil {
return nil, errors.Wrap(err, "failed to build statefulset")
}
return s.AsAppsV1STS()
}
// asAppsV1B1Deploy generates a K8s Deployment object
// out of the embedded yaml
func (m *executor) asAppsV1B1Deploy() (*api_apps_v1beta1.Deployment, error) {
d, err := m_k8s.NewDeploymentYml("AppsV1B1Deploy", m.Runtask.Spec.Task, m.Values)
if err != nil {
return nil, errors.Wrap(err, "failed to build deployment")
}
return d.AsAppsV1B1Deployment()
}
// asExtnV1B1Deploy generates a K8s Deployment object
// out of the embedded yaml
func (m *executor) asExtnV1B1Deploy() (*api_extn_v1beta1.Deployment, error) {
d, err := m_k8s.NewDeploymentYml("ExtnV1B11Deploy", m.Runtask.Spec.Task, m.Values)
if err != nil {
return nil, errors.Wrap(err, "failed to build deployment")
}
return d.AsExtnV1B1Deployment()
}
// asCStorPool generates a CstorPool object
// out of the embedded yaml
func (m *executor) asCStorPool() (*v1alpha1.CStorPool, error) {
d, err := m_k8s.NewCStorPoolYml("CStorPool", m.Runtask.Spec.Task, m.Values)
if err != nil {
return nil, errors.Wrap(err, "failed to build cstorpool")
}
return d.AsCStorPoolYml()
}
// asStoragePool generates a StoragePool object
// out of the embedded yaml
func (m *executor) asStoragePool() (*v1alpha1.StoragePool, error) {
d, err := m_k8s.NewStoragePoolYml("StoragePool", m.Runtask.Spec.Task, m.Values)
if err != nil {
return nil, errors.Wrap(err, "failed to build storagepool")
}
return d.AsStoragePoolYml()
}
// asCStorVolume generates a CstorVolume object
// out of the embedded yaml
func (m *executor) asCStorVolume() (*v1alpha1.CStorVolume, error) {
d, err := m_k8s.NewCStorVolumeYml("CstorVolume", m.Runtask.Spec.Task, m.Values)
if err != nil {
return nil, errors.Wrap(err, "failed to build cstorvolume")
}
return d.AsCStorVolumeYml()
}
// asCstorVolumeReplica generates a CStorVolumeReplica object
// out of the embedded yaml
func (m *executor) asCstorVolumeReplica() (*v1alpha1.CStorVolumeReplica, error) {
d, err := m_k8s.NewCStorVolumeReplicaYml("CstorVolumeReplica", m.Runtask.Spec.Task, m.Values)
if err != nil {
return nil, errors.Wrap(err, "failed to build cstorvolumereplica")
}
return d.AsCStorVolumeReplicaYml()
}
// asCoreV1Svc generates a K8s Service object
// out of the embedded yaml
func (m *executor) asCoreV1Svc() (*api_core_v1.Service, error) {
s, err := m_k8s.NewServiceYml("CoreV1Svc", m.Runtask.Spec.Task, m.Values)
if err != nil {
return nil, errors.Wrap(err, "failed to build service")
}
return s.AsCoreV1Service()
}
// putBatchV1Job will put a Job object
func (m *executor) putBatchV1Job() error {
j, err := m.asBatchV1Job()
if err != nil {
return errors.Wrap(err, "failed to create job")
}
job, err := m.getK8sClient().CreateBatchV1JobAsRaw(j)
if err != nil {
return errors.Wrap(err, "failed to create job")
}
util.SetNestedField(m.Values, job, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// putAppsV1STS will create a new StatefulSet
// object in the cluster and store the response
// in a json format
func (m *executor) putAppsV1STS() error {
j, err := m.asAppsV1STS()
if err != nil {
return errors.Wrap(err, "failed to create statefulset")
}
sts, err := m.getK8sClient().CreateAppsV1STSAsRaw(j)
if err != nil {
return errors.Wrap(err, "failed to create statefulset")
}
util.SetNestedField(m.Values, sts, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// putAppsV1B1Deploy will put (i.e. apply to a kubernetes cluster) a Deployment
// object. The Deployment specs is configured in the RunTask.
func (m *executor) putAppsV1B1Deploy() error {
d, err := m.asAppsV1B1Deploy()
if err != nil {
return errors.Wrap(err, "failed to create deployment")
}
deploy, err := m.getK8sClient().CreateAppsV1B1DeploymentAsRaw(d)
if err != nil {
return errors.Wrap(err, "failed to create deployment")
}
util.SetNestedField(m.Values, deploy, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// putExtnV1B1Deploy will put (i.e. apply to kubernetes cluster) a Deployment
// whose specifications are defined in the RunTask
func (m *executor) putExtnV1B1Deploy() error {
d, err := m.asExtnV1B1Deploy()
if err != nil {
return errors.Wrap(err, "failed to create deployment")
}
deploy, err := m.getK8sClient().CreateExtnV1B1DeploymentAsRaw(d)
if err != nil {
return errors.Wrap(err, "failed to create deployment")
}
util.SetNestedField(m.Values, deploy, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// patchSPC will patch a SPC object in a kubernetes cluster.
// The patch specifications as configured in the RunTask
func (m *executor) patchOEV1alpha1SPC() error {
patch, err := asTaskPatch("patchSPC", m.Runtask.Spec.Task, m.Values)
if err != nil {
return errors.Wrap(err, "failed to patch storagepoolclaim")
}
pe, err := newTaskPatchExecutor(patch)
if err != nil {
return errors.Wrap(err, "failed to patch storagepoolclaim")
}
raw, err := pe.toJson()
if err != nil {
return errors.Wrap(err, "failed to patch storagepoolclaim")
}
// patch storagepoolclaim
spc, err := m.getK8sClient().PatchOEV1alpha1SPCAsRaw(m.getTaskObjectName(), pe.patchType(), raw)
if err != nil {
return errors.Wrap(err, "failed to patch storagepoolclaim")
}
util.SetNestedField(m.Values, spc, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// patchOEV1alpha1CSPC will patch a CSPC object in a kubernetes cluster.
// The patch specifications as configured in the RunTask
func (m *executor) patchOEV1alpha1CSPC() (err error) {
patch, err := asTaskPatch("patchSPC", m.Runtask.Spec.Task, m.Values)
if err != nil {
return
}
pe, err := newTaskPatchExecutor(patch)
if err != nil {
return
}
raw, err := pe.toJson()
if err != nil {
return
}
// patch the CSPC
cspc, err := m.getK8sClient().PatchOEV1alpha1CSPCAsRaw(m.getTaskObjectName(), pe.patchType(), raw)
if err != nil {
return
}
util.SetNestedField(m.Values, cspc, string(v1alpha1.CurrentJSONResultTLP))
return
}
// patchOEV1alpha1CSV will patch a CStorVolume as defined in the task
func (m *executor) patchOEV1alpha1CSV() error {
patch, err := asTaskPatch("patchCSV", m.Runtask.Spec.Task, m.Values)
if err != nil {
return errors.Wrap(err, "failed to patch cstorvolume")
}
pe, err := newTaskPatchExecutor(patch)
if err != nil {
return errors.Wrap(err, "failed to patch cstorvolume")
}
raw, err := pe.toJson()
if err != nil {
return errors.Wrap(err, "failed to patch cstorvolume")
}
// patch the cstorvolume
csv, err := m.getK8sClient().PatchOEV1alpha1CSV(
m.getTaskObjectName(),
m.getTaskRunNamespace(),
pe.patchType(),
raw,
)
if err != nil {
return errors.Wrap(err, "failed to patch cstorvolume")
}
util.SetNestedField(m.Values, csv, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// patchOEV1alpha1CVR will patch a CStorVolumeReplica as defined in the task
func (m *executor) patchOEV1alpha1CVR() error {
patch, err := asTaskPatch("patchCVR", m.Runtask.Spec.Task, m.Values)
if err != nil {
return errors.Wrap(err, "failed to patch cstorvolumereplica")
}
pe, err := newTaskPatchExecutor(patch)
if err != nil {
return errors.Wrap(err, "failed to patch cstorvolumereplica")
}
raw, err := pe.toJson()
if err != nil {
return errors.Wrap(err, "failed to patch cstorvolumereplica")
}
// patch cstorvolumereplica
cvr, err := m.getK8sClient().PatchOEV1alpha1CVR(
m.getTaskObjectName(),
m.getTaskRunNamespace(),
pe.patchType(),
raw,
)
if err != nil {
return errors.Wrap(err, "failed to patch cstorvolumereplica")
}
util.SetNestedField(m.Values, cvr, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// patchUpgradeResult will patch an UpgradeResult
// as defined in the task
func (m *executor) patchUpgradeResult() error {
// build a runtask patch instance
patch, err := patch.
BuilderForRuntask("UpgradeResult", m.Runtask.Spec.Task, m.Values).
AddCheckf(patch.IsValidType(), "IsValidType").
Build()
if err != nil {
return errors.Wrap(err, "failed to patch upgraderesult")
}
// patch Upgrade Result
p, err := upgraderesult.
KubeClient(upgraderesult.WithNamespace(m.getTaskRunNamespace())).
Patch(m.getTaskObjectName(), patch.Type, patch.Object)
if err != nil {
return errors.Wrap(err, "failed to patch upgraderesult")
}
util.SetNestedField(m.Values, p, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// patchStoragePool will patch an StoragePool as defined in the task
func (m *executor) patchStoragePool() (err error) {
// build a runtask patch instance
patch, err := patch.
BuilderForRuntask("StoragePool", m.Runtask.Spec.Task, m.Values).
AddCheckf(patch.IsValidType(), "patch type is not valid").
Build()
if err != nil {
return
}
p, err := storagepool.
NewKubeClient().
Patch(m.getTaskObjectName(), patch.Type, patch.Object)
if err != nil {
return
}
util.SetNestedField(m.Values, p, string(v1alpha1.CurrentJSONResultTLP))
return
}
// patchCstorPool will patch an CstorPool as defined in the task
func (m *executor) patchCstorPool() (err error) {
patch, err := patch.
BuilderForRuntask("CstorPool", m.Runtask.Spec.Task, m.Values).
AddCheckf(patch.IsValidType(), "patch type is not valid").
Build()
if err != nil {
return
}
p, err := cstorpool.
NewKubeClient().
Patch(m.getTaskObjectName(), patch.Type, patch.Object)
if err != nil {
return
}
util.SetNestedField(m.Values, p, string(v1alpha1.CurrentJSONResultTLP))
return
}
// patchAppsV1B1Deploy will patch a Deployment object in a kubernetes cluster.
// The patch specifications as configured in the RunTask
func (m *executor) patchAppsV1B1Deploy() (err error) {
err = fmt.Errorf("patchAppsV1B1Deploy is not implemented")
return
}
// patchExtnV1B1Deploy will patch a Deployment
// object where patch specifications are
// configured in the RunTask
func (m *executor) patchExtnV1B1Deploy() error {
patch, err := asTaskPatch("ExtnV1B1DeployPatch", m.Runtask.Spec.Task, m.Values)
if err != nil {
return errors.Wrap(err, "failed to patch deployment")
}
pe, err := newTaskPatchExecutor(patch)
if err != nil {
return errors.Wrap(err, "failed to patch deployment")
}
raw, err := pe.toJson()
if err != nil {
return errors.Wrap(err, "failed to patch deployment")
}
// patch the deployment
deploy, err := m.getK8sClient().PatchExtnV1B1DeploymentAsRaw(
m.getTaskObjectName(),
pe.patchType(),
raw,
)
if err != nil {
return errors.Wrap(err, "failed to patch deployment")
}
util.SetNestedField(m.Values, deploy, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// patchCoreV1Service will patch a Service
// where patch specifications are configured
// in the RunTask
func (m *executor) patchCoreV1Service() error {
patch, err := asTaskPatch("CoreV1ServicePatch", m.Runtask.Spec.Task, m.Values)
if err != nil {
return errors.Wrap(err, "failed to patch service")
}
pe, err := newTaskPatchExecutor(patch)
if err != nil {
return errors.Wrap(err, "failed to patch service")
}
raw, err := pe.toJson()
if err != nil {
return errors.Wrap(err, "failed to patch service")
}
// patch service
service, err := m.getK8sClient().PatchCoreV1ServiceAsRaw(
m.getTaskObjectName(),
pe.patchType(),
raw,
)
if err != nil {
return errors.Wrap(err, "failed to patch service")
}
util.SetNestedField(m.Values, service, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// deleteAppsV1B1Deployment will delete one or
// more Deployments as specified in the RunTask
func (m *executor) deleteAppsV1B1Deployment() error {
objectNames := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
for _, name := range objectNames {
err := m.getK8sClient().DeleteAppsV1B1Deployment(strings.TrimSpace(name))
if err != nil {
return errors.Wrapf(err, "failed to delete deployment {%s}", name)
}
}
return nil
}
// deleteOEV1alpha1CVR will delete one or more
// CStorVolumeReplica as specified in
// the RunTask
func (m *executor) deleteOEV1alpha1CVR() error {
objectNames := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
for _, name := range objectNames {
err := m.getK8sClient().DeleteOEV1alpha1CVR(name)
if err != nil {
return errors.Wrapf(err, "failed to delete cstorvolumereplica {%s}", name)
}
}
return nil
}
// deleteExtnV1B1Deployment will delete one or
// more Deployments as specified in the RunTask
func (m *executor) deleteExtnV1B1Deployment() error {
objectNames := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
for _, name := range objectNames {
err := m.getK8sClient().DeleteExtnV1B1Deployment(strings.TrimSpace(name))
if err != nil {
return errors.Wrapf(err, "failed to delete deployment {%s}", name)
}
}
return nil
}
// getExtnV1B1ReplicaSet will get the Replicaset
// as specified in the RunTask
func (m *executor) getExtnV1B1ReplicaSet() error {
rs, err := replicaset.
KubeClient(replicaset.WithNamespace(m.getTaskRunNamespace())).
GetRaw(m.getTaskObjectName())
if err != nil {
return errors.Wrap(err, "failed to get replicaset")
}
util.SetNestedField(m.Values, rs, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// deleteExtnV1B1ReplicaSet will delete one or
// more ReplicaSets as specified in the RunTask
func (m *executor) deleteExtnV1B1ReplicaSet() error {
objectNames := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
client := replicaset.KubeClient(
replicaset.WithNamespace(m.getTaskRunNamespace()))
for _, name := range objectNames {
err := client.Delete(strings.TrimSpace(name))
if err != nil {
return errors.Wrapf(err, "failed to delete replicaset {%s}", name)
}
}
return nil
}
// listExtnV1B1ReplicaSet lists the replica sets
// based on the provided list options
func (m *executor) listExtnV1B1ReplicaSet(opt metav1.ListOptions) ([]byte, error) {
return replicaset.
KubeClient(replicaset.WithNamespace(m.getTaskRunNamespace())).
ListRaw(opt)
}
// putCoreV1Service will create a Service whose
// specs are configured in the RunTask
func (m *executor) putCoreV1Service() error {
s, err := m.asCoreV1Svc()
if err != nil {
return errors.Wrapf(err, "failed to create service")
}
svc, err := m.getK8sClient().CreateCoreV1ServiceAsRaw(s)
if err != nil {
return errors.Wrapf(err, "failed to create service")
}
util.SetNestedField(m.Values, svc, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// deleteCoreV1Service will delete one or more
// services as specified in the RunTask
func (m *executor) deleteCoreV1Service() error {
objectNames := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
for _, name := range objectNames {
err := m.getK8sClient().DeleteCoreV1Service(strings.TrimSpace(name))
if err != nil {
return errors.Wrapf(err, "failed to delete service {%s}", name)
}
}
return nil
}
// getOEV1alpha1Disk() will get the Disk
// as specified in the RunTask
func (m *executor) getOEV1alpha1Disk() error {
disk, err := m.getK8sClient().GetOEV1alpha1DiskAsRaw(m.getTaskObjectName())
if err != nil {
return errors.Wrapf(err, "failed to get disk")
}
util.SetNestedField(m.Values, disk, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// getOEV1alpha1SPC() will get the StoragePoolClaim
// as specified in the RunTask
func (m *executor) getOEV1alpha1SPC() error {
spc, err := m.getK8sClient().GetOEV1alpha1SPCAsRaw(m.getTaskObjectName())
if err != nil {
return errors.Wrapf(err, "failed to get storagepoolclaim")
}
util.SetNestedField(m.Values, spc, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// getOEV1alpha1CSPC() will get the CStorPoolCluster as specified in the RunTask
func (m *executor) getOEV1alpha1CSPC() (err error) {
cspc, err := m.getK8sClient().GetOEV1alpha1CSPCAsRaw(m.getTaskObjectName())
if err != nil {
return
}
util.SetNestedField(m.Values, cspc, string(v1alpha1.CurrentJSONResultTLP))
return
}
// getOEV1alpha1SP will get the StoragePool as specified in the RunTask
func (m *executor) getOEV1alpha1SP() (err error) {
sp, err := m.getK8sClient().GetOEV1alpha1SPAsRaw(m.getTaskObjectName())
if err != nil {
return errors.Wrapf(err, "failed to get storagepool")
}
util.SetNestedField(m.Values, sp, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
func (m *executor) getOEV1alpha1CSP() error {
csp, err := m.getK8sClient().GetOEV1alpha1CSPAsRaw(m.getTaskObjectName())
if err != nil {
return errors.Wrapf(err, "failed to get cstorstoragepool")
}
util.SetNestedField(m.Values, csp, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// getOEV1alpha1UR will get the UpgradeResult
// as specified in the RunTask
func (m *executor) getOEV1alpha1UR() error {
uresult, err := upgraderesult.
KubeClient(upgraderesult.WithNamespace(m.getTaskRunNamespace())).
Get(m.getTaskObjectName(), metav1.GetOptions{})
if err != nil {
return errors.Wrap(err, "failed to get upgraderesult")
}
ur, err := json.Marshal(uresult)
if err != nil {
return errors.Wrap(err, "failed to get upgraderesult")
}
util.SetNestedField(m.Values, ur, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// getOEV1alpha1CSV will get the CstorVolume as specified in the RunTask
func (m *executor) getOEV1alpha1CSV() error {
csv, err := cstorvolume.NewKubeclient(
cstorvolume.WithNamespace(m.getTaskRunNamespace())).
GetRaw(m.getTaskObjectName(), metav1.GetOptions{})
if err != nil {
return err
}
util.SetNestedField(m.Values, csv, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// getCoreV1Service will get the Service as specified in the RunTask
func (m *executor) getCoreV1Service() error {
svc, err := service.KubeClient(
service.WithNamespace(m.getTaskRunNamespace())).
GetRaw(m.getTaskObjectName(), metav1.GetOptions{})
if err != nil {
return err
}
util.SetNestedField(m.Values, svc, string(v1alpha1.CurrentJSONResultTLP))
return nil
}
// getExtnV1B1Deployment will get the Deployment as specified in the RunTask
func (m *executor) getExtnV1B1Deployment() (err error) {
dclient := deploy_extnv1beta1.KubeClient(
deploy_extnv1beta1.WithNamespace(m.getTaskRunNamespace()),
deploy_extnv1beta1.WithClientset(m.getK8sClient().GetKCS()))
d, err := dclient.GetRaw(m.getTaskObjectName())
if err != nil {
return
}
util.SetNestedField(m.Values, d, string(v1alpha1.CurrentJSONResultTLP))
return
}
// extnV1B1DeploymentRollOutStatus generates rollout status for a given deployment from deployment object
func (m *executor) extnV1B1DeploymentRollOutStatus() (err error) {
dclient := deploy_extnv1beta1.KubeClient(
deploy_extnv1beta1.WithNamespace(m.getTaskRunNamespace()),
deploy_extnv1beta1.WithClientset(m.getK8sClient().GetKCS()))
res, err := dclient.RolloutStatusf(m.getTaskObjectName())
if err != nil {
return
}
util.SetNestedField(m.Values, res, string(v1alpha1.CurrentJSONResultTLP))
return
}
// getAppsV1DeploymentRollOutStatus generates rollout status for a given deployment from deployment object
func (m *executor) appsV1DeploymentRollOutStatus() (err error) {
dclient := deploy_appsv1.KubeClient(
deploy_appsv1.WithNamespace(m.getTaskRunNamespace()),
deploy_appsv1.WithClientset(m.getK8sClient().GetKCS()))
res, err := dclient.RolloutStatusf(m.getTaskObjectName())
if err != nil {
return
}
util.SetNestedField(m.Values, res, string(v1alpha1.CurrentJSONResultTLP))
return
}
// getAppsV1Deployment will get the Deployment as specified in the RunTask
func (m *executor) getAppsV1Deployment() (err error) {
dclient := deploy_appsv1.KubeClient(
deploy_appsv1.WithNamespace(m.getTaskRunNamespace()),
deploy_appsv1.WithClientset(m.getK8sClient().GetKCS()))
d, err := dclient.GetRaw(m.getTaskObjectName())
util.SetNestedField(m.Values, d, string(v1alpha1.CurrentJSONResultTLP))
return
}
// getCoreV1PVC will get the PVC as specified in the RunTask
func (m *executor) getCoreV1PVC() (err error) {
pvc, err := m.getK8sClient().GetCoreV1PVCAsRaw(m.getTaskObjectName())
if err != nil {
return
}
util.SetNestedField(m.Values, pvc, string(v1alpha1.CurrentJSONResultTLP))
return
}
// getCoreV1PV will get the PersistentVolume as specified in the RunTask
func (m *executor) getCoreV1PV() (err error) {
pv, err := m.getK8sClient().GetCoreV1PersistentVolumeAsRaw(m.getTaskObjectName())
if err != nil {
return
}
util.SetNestedField(m.Values, pv, string(v1alpha1.CurrentJSONResultTLP))
return
}
// getBatchV1Job will get the Job as specified in the RunTask
func (m *executor) getBatchV1Job() (err error) {
job, err := m.getK8sClient().GetBatchV1JobAsRaw(m.getTaskObjectName())
if err != nil {
return
}
util.SetNestedField(m.Values, job, string(v1alpha1.CurrentJSONResultTLP))
return
}
// getCoreV1Pod will get the Pod as specified in the RunTask
func (m *executor) getCoreV1Pod() (err error) {
podClient := pod.NewKubeClient(pod.WithNamespace(m.getTaskRunNamespace()))
pod, err := podClient.GetRaw(m.getTaskObjectName(), metav1.GetOptions{})
if err != nil {
return
}
util.SetNestedField(m.Values, pod, string(v1alpha1.CurrentJSONResultTLP))
return
}
// deleteBatchV1Job will delete one or more Jobs specified in the RunTask
func (m *executor) deleteBatchV1Job() (err error) {
jobs := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
for _, name := range jobs {
err = m.getK8sClient().DeleteBatchV1Job(strings.TrimSpace(name))
if err != nil {
return
}
}
return
}
// deleteAppsV1STS will delete one or more StatefulSets
func (m *executor) deleteAppsV1STS() (err error) {
stss := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
for _, name := range stss {
err = m.getK8sClient().DeleteAppsV1STS(strings.TrimSpace(name))
if err != nil {
return
}
}
return
}
// getStorageV1SC will get the StorageClass as specified in the RunTask
func (m *executor) getStorageV1SC() (err error) {
sc, err := m.getK8sClient().GetStorageV1SCAsRaw(m.getTaskObjectName())
if err != nil {
return
}
util.SetNestedField(m.Values, sc, string(v1alpha1.CurrentJSONResultTLP))
return
}
// putStoragePool will put a CStorPool as defined in the task
func (m *executor) putStoragePool() (err error) {
c, err := m.asStoragePool()
if err != nil {
return
}
storagePool, err := m.getK8sClient().CreateOEV1alpha1SPAsRaw(c)
if err != nil {
return
}
util.SetNestedField(m.Values, storagePool, string(v1alpha1.CurrentJSONResultTLP))
return
}
// putCStorVolume will put a CStorVolume as defined in the task
func (m *executor) putCStorPool() (err error) {
c, err := m.asCStorPool()
if err != nil {
return
}
cstorPool, err := m.getK8sClient().CreateOEV1alpha1CSPAsRaw(c)
if err != nil {
return
}
util.SetNestedField(m.Values, cstorPool, string(v1alpha1.CurrentJSONResultTLP))
return
}
// putCStorVolume will put a CStorVolume as defined in the task
func (m *executor) putCStorVolume() (err error) {
c, err := m.asCStorVolume()
if err != nil {
return
}
cstorVolume, err := m.getK8sClient().CreateOEV1alpha1CVAsRaw(c)
if err != nil {
return
}
util.SetNestedField(m.Values, cstorVolume, string(v1alpha1.CurrentJSONResultTLP))
return
}
// putCStorVolumeReplica will put a CStorVolumeReplica as defined in the task
func (m *executor) putCStorVolumeReplica() (err error) {
d, err := m.asCstorVolumeReplica()
if err != nil {
return
}
cstorVolumeReplica, err := m.getK8sClient().CreateOEV1alpha1CVRAsRaw(d)
if err != nil {
return
}
util.SetNestedField(m.Values, cstorVolumeReplica, string(v1alpha1.CurrentJSONResultTLP))
return
}
// putUpgradeResult will put an upgrade result as defined in the task
func (m *executor) putUpgradeResult() (err error) {
uresult, err := upgraderesult.
BuilderForRuntask("UpgradeResult", m.Runtask.Spec.Task, m.Values).
Build()
if err != nil {
return
}
uraw, err := upgraderesult.
KubeClient(upgraderesult.WithNamespace(m.getTaskRunNamespace())).
CreateRaw(uresult)
if err != nil {
return
}
util.SetNestedField(m.Values, uraw, string(v1alpha1.CurrentJSONResultTLP))
return
}
// deleteOEV1alpha1SP will delete one or more StoragePool as specified in
// the RunTask
func (m *executor) deleteOEV1alpha1SP() (err error) {
objectNames := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
for _, name := range objectNames {
err = m.getK8sClient().DeleteOEV1alpha1SP(name)
if err != nil {
return
}
}
return
}
// deleteOEV1alpha1CSP will delete one or more CStorPool as specified in
// the RunTask
func (m *executor) deleteOEV1alpha1CSP() (err error) {
objectNames := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
for _, name := range objectNames {
err = m.getK8sClient().DeleteOEV1alpha1CSP(name)
if err != nil {
return
}
}
return
}
// deleteOEV1alpha1CSV will delete one or more CStorVolume as specified in
// the RunTask
func (m *executor) deleteOEV1alpha1CSV() (err error) {
objectNames := strings.Split(strings.TrimSpace(m.getTaskObjectName()), ",")
for _, name := range objectNames {
err = m.getK8sClient().DeleteOEV1alpha1CSV(name)
if err != nil {
return
}
}
return
}
// execCoreV1Pod runs given command remotely in given container of given pod
// and post stdout and and stderr in JsonResult. You can get it using -
// {{- jsonpath .JsonResult "{.Stdout}" | trim | saveAs "XXX" .TaskResult | noop -}}
func (m *executor) execCoreV1Pod() (err error) {
podexecopts, err := podexec.WithTemplate("execCoreV1Pod", m.Runtask.Spec.Task, m.Values).
AsAPIPodExec()
if err != nil {
return
}
result, err := m.getK8sClient().ExecCoreV1Pod(m.getTaskObjectName(), podexecopts)
if err != nil {
return
}
util.SetNestedField(m.Values, result, string(v1alpha1.CurrentJSONResultTLP))
return
}
// rolloutStatus generates rollout status of a given resource form it's object details
func (m *executor) rolloutStatus() (err error) {
if m.MetaExec.isRolloutstatusExtnV1B1Deploy() {
err = m.extnV1B1DeploymentRollOutStatus()
} else if m.MetaExec.isRolloutstatusAppsV1Deploy() {
err = m.appsV1DeploymentRollOutStatus()
} else {
err = fmt.Errorf("failed to get rollout status : meta task not supported: task details '%+v'", m.MetaExec.getTaskIdentity())
}
return
}
// listK8sResources will list resources as specified in the RunTask
func (m *executor) listK8sResources() (err error) {
opts, err := m.MetaExec.getListOptions()
if err != nil {
return
}
var op []byte
kc := m.getK8sClient()
if m.MetaExec.isListCoreV1Pod() {
op, err = kc.ListCoreV1PodAsRaw(opts)
} else if m.MetaExec.isListCoreV1Service() {
op, err = kc.ListCoreV1ServiceAsRaw(opts)
} else if m.MetaExec.isListExtnV1B1Deploy() {
op, err = kc.ListExtnV1B1DeploymentAsRaw(opts)
} else if m.MetaExec.isListExtnV1B1ReplicaSet() {
op, err = m.listExtnV1B1ReplicaSet(opts)
} else if m.MetaExec.isListAppsV1B1Deploy() {
op, err = kc.ListAppsV1B1DeploymentAsRaw(opts)
} else if m.MetaExec.isListCoreV1PVC() {
op, err = kc.ListCoreV1PVCAsRaw(opts)
} else if m.MetaExec.isListCoreV1PV() {
op, err = kc.ListCoreV1PVAsRaw(opts)
} else if m.MetaExec.isListOEV1alpha1Disk() {
op, err = kc.ListOEV1alpha1DiskRaw(opts)
} else if m.MetaExec.isListOEV1alpha1SP() {
op, err = kc.ListOEV1alpha1SPRaw(opts)
} else if m.MetaExec.isListOEV1alpha1CSP() {
op, err = kc.ListOEV1alpha1CSPRaw(opts)
} else if m.MetaExec.isListOEV1alpha1CVR() {
op, err = kc.ListOEV1alpha1CVRRaw(opts)
} else if m.MetaExec.isListOEV1alpha1CV() {
op, err = kc.ListOEV1alpha1CVRaw(opts)
} else if m.MetaExec.isListOEV1alpha1UR() {
op, err = m.listOEV1alpha1URRaw(opts)
} else {
err = fmt.Errorf("failed to list k8s resources: meta task not supported: task details '%+v'", m.MetaExec.getTaskIdentity())
}
if err != nil {
return
}
// set the json doc result
util.SetNestedField(m.Values, op, string(v1alpha1.CurrentJSONResultTLP))
return
}
// listOEV1alpha1URRaw fetches a list of UpgradeResults as per the
// provided options
func (m *executor) listOEV1alpha1URRaw(opts metav1.ListOptions) (result []byte, err error) {
uc := upgraderesult.KubeClient(upgraderesult.WithNamespace(m.getTaskRunNamespace()))
urList, err := uc.List(opts)
if err != nil {
return
}
result, err = json.Marshal(urList)
return
}
| 1 | 15,396 | Can this be `BuilderForYAMLObject` | openebs-maya | go |
@@ -42,12 +42,14 @@ cpp_test = pytest.mark.skipif(not hasattr(core, "test_coverage"),
#-------------------------------------------------------------------------------
def test_multiprocessing_threadpool():
+ import atexit
# Verify that threads work properly after forking (#1758)
import multiprocessing as mp
from datatable.internal import get_thread_ids
parent_threads = get_thread_ids()
n = 4
pool = mp.Pool(processes=n)
+ atexit.register(pool.close)
child_threads = pool.starmap(get_thread_ids, [()] * n, chunksize=1)
assert len(child_threads) == n
for chthreads in child_threads: | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Copyright 2018 H2O.ai
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#-------------------------------------------------------------------------------
import datatable as dt
import itertools
import pytest
import subprocess
import sys
import time
from datatable.lib import core
#-------------------------------------------------------------------------------
# Check if we need to run C++ tests
#-------------------------------------------------------------------------------
cpp_test = pytest.mark.skipif(not hasattr(core, "test_coverage"),
reason="C++ tests were not compiled")
#-------------------------------------------------------------------------------
# Test parallel infrastructure
#-------------------------------------------------------------------------------
def test_multiprocessing_threadpool():
# Verify that threads work properly after forking (#1758)
import multiprocessing as mp
from datatable.internal import get_thread_ids
parent_threads = get_thread_ids()
n = 4
pool = mp.Pool(processes=n)
child_threads = pool.starmap(get_thread_ids, [()] * n, chunksize=1)
assert len(child_threads) == n
for chthreads in child_threads:
assert len(parent_threads) == len(chthreads)
assert chthreads != parent_threads
@cpp_test
@pytest.mark.parametrize('test_name, nargs',
[
["shmutex", 3],
["barrier", 1],
["parallel_for_static", 1],
["parallel_for_dynamic", 1],
["parallel_for_ordered", 1],
["progress_static", 2],
["progress_nested", 2],
["progress_dynamic", 2],
["progress_ordered", 2]
]
)
def test_parameters(test_name, nargs):
for i in range(nargs - 1):
args = list(range(i))
message = ("In %s the number of arguments required is %d, "
"got: %d" % ("test_" + test_name + r"\(\)", nargs, i))
with pytest.raises(ValueError, match = message):
testfn = "test_%s" % test_name
getattr(core, testfn)(*args)
@cpp_test
def test_internal_shared_mutex():
core.test_shmutex(500, dt.options.nthreads * 2, 1)
@cpp_test
def test_internal_shared_bmutex():
core.test_shmutex(1000, dt.options.nthreads * 2, 0)
@cpp_test
def test_internal_atomic():
core.test_atomic()
@cpp_test
def test_internal_barrier():
core.test_barrier(100)
@cpp_test
def test_internal_parallel_for_static():
core.test_parallel_for_static(1000)
@cpp_test
def test_internal_parallel_for_dynamic():
core.test_parallel_for_dynamic(1000)
@cpp_test
def test_internal_parallel_for_ordered1():
core.test_parallel_for_ordered(1723)
@cpp_test
def test_internal_parallel_for_ordered2():
n0 = dt.options.nthreads
try:
dt.options.nthreads = 2
core.test_parallel_for_ordered(1723)
finally:
dt.options.nthreads = n0
# Make sure C++ tests run cleanly when not interrupted
@cpp_test
@pytest.mark.parametrize('parallel_type, nthreads',
itertools.product(
["static", "nested", "dynamic", "ordered"],
[1, dt.options.nthreads//2, dt.options.nthreads]
)
)
def test_progress(parallel_type, nthreads):
niterations = 1000
ntimes = 2
cmd = "core.test_progress_%s(%s, %s);" % (
parallel_type, niterations, nthreads)
for _ in range(ntimes) :
exec(cmd)
# Send interrupt signal and make sure process throws KeyboardInterrupt
@cpp_test
@pytest.mark.parametrize('parallel_type, nthreads',
itertools.product(
[None, "static", "nested", "dynamic", "ordered"],
[1, dt.options.nthreads//2, dt.options.nthreads]
)
)
def test_progress_interrupt(parallel_type, nthreads):
import signal
niterations = 10000
sleep_time = 0.01
exception = "KeyboardInterrupt\n"
message = "[cancelled]\x1b[m\x1b[K\n"
cmd = "import datatable as dt; from datatable.lib import core;"
cmd += "dt.options.progress.enabled = True;"
cmd += "dt.options.progress.min_duration = 0;"
cmd += "print('%s start', flush = True); " % parallel_type;
if parallel_type:
if parallel_type == "ordered":
niterations //= 10
cmd += "core.test_progress_%s(%s, %s)" % (
parallel_type, niterations, nthreads)
else:
cmd += "import time; "
cmd += "dt.options.nthreads = %s; " % nthreads
cmd += "time.sleep(%s);" % sleep_time * 10
proc = subprocess.Popen([sys.executable, "-c", cmd],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
line = proc.stdout.readline()
assert line.decode() == str(parallel_type) + " start\n"
time.sleep(sleep_time);
proc.send_signal(signal.Signals.SIGINT)
(stdout, stderr) = proc.communicate()
stdout_str = stdout.decode()
stderr_str = stderr.decode()
is_exception = stderr_str.endswith(exception)
is_cancelled = stdout_str.endswith(message) if parallel_type else is_exception
if not is_exception or not is_cancelled:
print("\nstdout: \n%s" % stdout_str)
print("\nstderr: \n%s" % stderr_str)
assert is_cancelled
assert is_exception
| 1 | 11,950 | better use `with mp.Pool(...) as pool:` here | h2oai-datatable | py |
@@ -48,8 +48,12 @@ public class Pids extends AbstractApiBean {
String baseUrl = systemConfig.getDataCiteRestApiUrlString();
String username = System.getProperty("doi.username");
String password = System.getProperty("doi.password");
- JsonObjectBuilder result = PidUtil.queryDoi(persistentId, baseUrl, username, password);
- return ok(result);
+ try {
+ JsonObjectBuilder result = PidUtil.queryDoi(persistentId, baseUrl, username, password);
+ return ok(result);
+ } catch (Exception ex) {
+ return error(Response.Status.BAD_REQUEST, ex.getLocalizedMessage());
+ }
}
@GET | 1 | package edu.harvard.iq.dataverse.api;
import edu.harvard.iq.dataverse.Dataset;
import static edu.harvard.iq.dataverse.api.AbstractApiBean.error;
import edu.harvard.iq.dataverse.authorization.users.User;
import edu.harvard.iq.dataverse.engine.command.impl.DeletePidCommand;
import edu.harvard.iq.dataverse.engine.command.impl.ReservePidCommand;
import edu.harvard.iq.dataverse.pidproviders.PidUtil;
import edu.harvard.iq.dataverse.util.BundleUtil;
import java.util.Arrays;
import javax.ejb.Stateless;
import javax.json.Json;
import javax.json.JsonArray;
import javax.json.JsonArrayBuilder;
import javax.json.JsonObjectBuilder;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* PIDs are Persistent IDentifiers such as DOIs or Handles.
*
* Currently PIDs can be minted at the dataset and file level but there is
* demand for PIDs at the dataverse level too. That's why this dedicated "pids"
* endpoint exists, to be somewhat future proof.
*/
@Stateless
@Path("pids")
public class Pids extends AbstractApiBean {
@GET
@Produces(MediaType.APPLICATION_JSON)
public Response getPid(@QueryParam("persistentId") String persistentId) {
try {
User user = findUserOrDie();
if (!user.isSuperuser()) {
return error(Response.Status.FORBIDDEN, BundleUtil.getStringFromBundle("admin.api.auth.mustBeSuperUser"));
}
} catch (WrappedResponse ex) {
return error(Response.Status.FORBIDDEN, BundleUtil.getStringFromBundle("api.errors.invalidApiToken"));
}
String baseUrl = systemConfig.getDataCiteRestApiUrlString();
String username = System.getProperty("doi.username");
String password = System.getProperty("doi.password");
JsonObjectBuilder result = PidUtil.queryDoi(persistentId, baseUrl, username, password);
return ok(result);
}
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("unreserved")
public Response getUnreserved(@QueryParam("persistentId") String persistentId) {
try {
User user = findUserOrDie();
if (!user.isSuperuser()) {
return error(Response.Status.FORBIDDEN, BundleUtil.getStringFromBundle("admin.api.auth.mustBeSuperUser"));
}
} catch (WrappedResponse ex) {
return error(Response.Status.FORBIDDEN, BundleUtil.getStringFromBundle("api.errors.invalidApiToken"));
}
JsonArrayBuilder unreserved = Json.createArrayBuilder();
for (Dataset dataset : datasetSvc.findAll()) {
if (dataset.isReleased()) {
continue;
}
if (dataset.getGlobalIdCreateTime() == null) {
unreserved.add(Json.createObjectBuilder()
.add("id", dataset.getId())
.add("pid", dataset.getGlobalId().asString())
);
}
}
JsonArray finalUnreserved = unreserved.build();
int size = finalUnreserved.size();
return ok(Json.createObjectBuilder()
.add("numUnreserved", size)
.add("count", finalUnreserved)
);
}
@POST
@Produces(MediaType.APPLICATION_JSON)
@Path("{id}/reserve")
public Response reservePid(@PathParam("id") String idSupplied) {
try {
Dataset dataset = findDatasetOrDie(idSupplied);
execCommand(new ReservePidCommand(createDataverseRequest(findUserOrDie()), dataset));
return ok(BundleUtil.getStringFromBundle("pids.api.reservePid.success", Arrays.asList(dataset.getGlobalId().asString())));
} catch (WrappedResponse ex) {
return ex.getResponse();
}
}
@DELETE
@Produces(MediaType.APPLICATION_JSON)
@Path("{id}/delete")
public Response deletePid(@PathParam("id") String idSupplied) {
try {
Dataset dataset = findDatasetOrDie(idSupplied);
//Restrict to never-published datasets (that should have draft/nonpublic pids). The underlying code will invalidate
//pids that have been made public by a pid-specific method, but it's not clear that invalidating such a pid via an api that doesn't
//destroy the dataset is a good idea.
if(dataset.isReleased()) {
return badRequest("Not allowed for Datasets that have been published.");
}
execCommand(new DeletePidCommand(createDataverseRequest(findUserOrDie()), dataset));
return ok(BundleUtil.getStringFromBundle("pids.api.deletePid.success", Arrays.asList(dataset.getGlobalId().asString())));
} catch (WrappedResponse ex) {
return ex.getResponse();
}
}
}
| 1 | 43,350 | Since this is a config problem, BAD_REQUEST is probably not the right response. | IQSS-dataverse | java |
@@ -479,6 +479,11 @@ func (o *Options) ProcessConfigFile(configFile string) error {
errors = append(errors, err)
continue
}
+ if dur < 30*time.Second {
+ err := &configErr{tk, fmt.Sprintf("invalid lame_duck_duration of %v, minimum is 30 seconds", dur)}
+ errors = append(errors, err)
+ continue
+ }
o.LameDuckDuration = dur
case "trusted":
switch v.(type) { | 1 | // Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/nats-io/gnatsd/conf"
"github.com/nats-io/nkeys"
)
// ClusterOpts are options for clusters.
type ClusterOpts struct {
Host string `json:"addr,omitempty"`
Port int `json:"cluster_port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
Permissions *RoutePermissions `json:"-"`
TLSTimeout float64 `json:"-"`
TLSConfig *tls.Config `json:"-"`
ListenStr string `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ConnectRetries int `json:"-"`
}
// GatewayOpts are options for gateways.
type GatewayOpts struct {
Name string `json:"name"`
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
Advertise string `json:"advertise,omitempty"`
ConnectRetries int `json:"connect_retries,omitempty"`
DefaultPermissions *GatewayPermissions `json:"default_permissions,omitempty"`
Gateways []*RemoteGatewayOpts `json:"gateways,omitempty"`
RejectUnknown bool `json:"reject_unknown,omitempty"`
// Not exported, for tests.
resolver netResolver
sendQSubsBufSize int
}
// RemoteGatewayOpts are options for connecting to a remote gateway
type RemoteGatewayOpts struct {
Name string `json:"name"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
URLs []*url.URL `json:"urls,omitempty"`
Permissions *GatewayPermissions `json:"permissions,omitempty"`
}
// Options block for gnatsd server.
type Options struct {
ConfigFile string `json:"-"`
Host string `json:"addr"`
Port int `json:"port"`
ClientAdvertise string `json:"-"`
Trace bool `json:"-"`
Debug bool `json:"-"`
NoLog bool `json:"-"`
NoSigs bool `json:"-"`
Logtime bool `json:"-"`
MaxConn int `json:"max_connections"`
MaxSubs int `json:"max_subscriptions,omitempty"`
Nkeys []*NkeyUser `json:"-"`
Users []*User `json:"-"`
Accounts []*Account `json:"-"`
AllowNewAccounts bool `json:"-"`
Username string `json:"-"`
Password string `json:"-"`
Authorization string `json:"-"`
PingInterval time.Duration `json:"ping_interval"`
MaxPingsOut int `json:"ping_max"`
HTTPHost string `json:"http_host"`
HTTPPort int `json:"http_port"`
HTTPSPort int `json:"https_port"`
AuthTimeout float64 `json:"auth_timeout"`
MaxControlLine int `json:"max_control_line"`
MaxPayload int `json:"max_payload"`
MaxPending int64 `json:"max_pending"`
Cluster ClusterOpts `json:"cluster,omitempty"`
Gateway GatewayOpts `json:"gateway,omitempty"`
ProfPort int `json:"-"`
PidFile string `json:"-"`
PortsFileDir string `json:"-"`
LogFile string `json:"-"`
Syslog bool `json:"-"`
RemoteSyslog string `json:"-"`
Routes []*url.URL `json:"-"`
RoutesStr string `json:"-"`
TLSTimeout float64 `json:"tls_timeout"`
TLS bool `json:"-"`
TLSVerify bool `json:"-"`
TLSCert string `json:"-"`
TLSKey string `json:"-"`
TLSCaCert string `json:"-"`
TLSConfig *tls.Config `json:"-"`
WriteDeadline time.Duration `json:"-"`
RQSubsSweep time.Duration `json:"-"` // Deprecated
MaxClosedClients int `json:"-"`
LameDuckDuration time.Duration `json:"-"`
TrustedNkeys []string `json:"-"`
CustomClientAuthentication Authentication `json:"-"`
CustomRouterAuthentication Authentication `json:"-"`
// CheckConfig configuration file syntax test was successful and exit.
CheckConfig bool `json:"-"`
// private fields, used for testing
gatewaysSolicitDelay time.Duration
}
type netResolver interface {
LookupHost(ctx context.Context, host string) ([]string, error)
}
// Clone performs a deep copy of the Options struct, returning a new clone
// with all values copied.
func (o *Options) Clone() *Options {
if o == nil {
return nil
}
clone := &Options{}
*clone = *o
if o.Users != nil {
clone.Users = make([]*User, len(o.Users))
for i, user := range o.Users {
clone.Users[i] = user.clone()
}
}
if o.Nkeys != nil {
clone.Nkeys = make([]*NkeyUser, len(o.Nkeys))
for i, nkey := range o.Nkeys {
clone.Nkeys[i] = nkey.clone()
}
}
if o.Routes != nil {
clone.Routes = deepCopyURLs(o.Routes)
}
if o.TLSConfig != nil {
clone.TLSConfig = o.TLSConfig.Clone()
}
if o.Cluster.TLSConfig != nil {
clone.Cluster.TLSConfig = o.Cluster.TLSConfig.Clone()
}
if o.Gateway.TLSConfig != nil {
clone.Gateway.TLSConfig = o.Gateway.TLSConfig.Clone()
}
if len(o.Gateway.Gateways) > 0 {
clone.Gateway.Gateways = make([]*RemoteGatewayOpts, len(o.Gateway.Gateways))
for i, g := range o.Gateway.Gateways {
clone.Gateway.Gateways[i] = g.clone()
}
}
return clone
}
func deepCopyURLs(urls []*url.URL) []*url.URL {
if urls == nil {
return nil
}
curls := make([]*url.URL, len(urls))
for i, u := range urls {
cu := &url.URL{}
*cu = *u
curls[i] = cu
}
return curls
}
// Configuration file authorization section.
type authorization struct {
// Singles
user string
pass string
token string
// Multiple Nkeys/Users
nkeys []*NkeyUser
users []*User
timeout float64
defaultPermissions *Permissions
}
// TLSConfigOpts holds the parsed tls config information,
// used with flag parsing
type TLSConfigOpts struct {
CertFile string
KeyFile string
CaFile string
Verify bool
Timeout float64
Ciphers []uint16
CurvePreferences []tls.CurveID
}
var tlsUsage = `
TLS configuration is specified in the tls section of a configuration file:
e.g.
tls {
cert_file: "./certs/server-cert.pem"
key_file: "./certs/server-key.pem"
ca_file: "./certs/ca.pem"
verify: true
cipher_suites: [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
]
curve_preferences: [
"CurveP256",
"CurveP384",
"CurveP521"
]
}
Available cipher suites include:
`
// ProcessConfigFile processes a configuration file.
// FIXME(dlc): A bit hacky
func ProcessConfigFile(configFile string) (*Options, error) {
opts := &Options{}
if err := opts.ProcessConfigFile(configFile); err != nil {
// If only warnings then continue and return the options.
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) == 0 {
return opts, nil
}
return nil, err
}
return opts, nil
}
// token is an item parsed from the configuration.
type token interface {
Value() interface{}
Line() int
IsUsedVariable() bool
SourceFile() string
Position() int
}
// unwrapValue can be used to get the token and value from an item
// to be able to report the line number in case of an incorrect
// configuration.
func unwrapValue(v interface{}) (token, interface{}) {
switch tk := v.(type) {
case token:
return tk, tk.Value()
default:
return nil, v
}
}
// ProcessConfigFile updates the Options structure with options
// present in the given configuration file.
// This version is convenient if one wants to set some default
// options and then override them with what is in the config file.
// For instance, this version allows you to do something such as:
//
// opts := &Options{Debug: true}
// opts.ProcessConfigFile(myConfigFile)
//
// If the config file contains "debug: false", after this call,
// opts.Debug would really be false. It would be impossible to
// achieve that with the non receiver ProcessConfigFile() version,
// since one would not know after the call if "debug" was not present
// or was present but set to false.
func (o *Options) ProcessConfigFile(configFile string) error {
o.ConfigFile = configFile
if configFile == "" {
return nil
}
m, err := conf.ParseFileWithChecks(configFile)
if err != nil {
return err
}
// Collect all errors and warnings and report them all together.
errors := make([]error, 0)
warnings := make([]error, 0)
for k, v := range m {
tk, v := unwrapValue(v)
switch strings.ToLower(k) {
case "listen":
hp, err := parseListen(v)
if err != nil {
errors = append(errors, &configErr{tk, err.Error()})
continue
}
o.Host = hp.host
o.Port = hp.port
case "client_advertise":
o.ClientAdvertise = v.(string)
case "port":
o.Port = int(v.(int64))
case "host", "net":
o.Host = v.(string)
case "debug":
o.Debug = v.(bool)
case "trace":
o.Trace = v.(bool)
case "logtime":
o.Logtime = v.(bool)
case "accounts":
err := parseAccounts(tk, o, &errors, &warnings)
if err != nil {
errors = append(errors, err)
continue
}
case "authorization":
auth, err := parseAuthorization(tk, o, &errors, &warnings)
if err != nil {
errors = append(errors, err)
continue
}
o.Username = auth.user
o.Password = auth.pass
o.Authorization = auth.token
if (auth.user != "" || auth.pass != "") && auth.token != "" {
err := &configErr{tk, fmt.Sprintf("Cannot have a user/pass and token")}
errors = append(errors, err)
continue
}
o.AuthTimeout = auth.timeout
// Check for multiple users defined
if auth.users != nil {
if auth.user != "" {
err := &configErr{tk, fmt.Sprintf("Can not have a single user/pass and a users array")}
errors = append(errors, err)
continue
}
if auth.token != "" {
err := &configErr{tk, fmt.Sprintf("Can not have a token and a users array")}
errors = append(errors, err)
continue
}
// Users may have been added from Accounts parsing, so do an append here
o.Users = append(o.Users, auth.users...)
}
// Check for nkeys
if auth.nkeys != nil {
// NKeys may have been added from Accounts parsing, so do an append here
o.Nkeys = append(o.Nkeys, auth.nkeys...)
}
case "http":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
o.HTTPHost = hp.host
o.HTTPPort = hp.port
case "https":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
o.HTTPHost = hp.host
o.HTTPSPort = hp.port
case "http_port", "monitor_port":
o.HTTPPort = int(v.(int64))
case "https_port":
o.HTTPSPort = int(v.(int64))
case "cluster":
err := parseCluster(tk, o, &errors, &warnings)
if err != nil {
errors = append(errors, err)
continue
}
case "gateway":
if err := parseGateway(tk, o, &errors, &warnings); err != nil {
errors = append(errors, err)
continue
}
case "logfile", "log_file":
o.LogFile = v.(string)
case "syslog":
o.Syslog = v.(bool)
case "remote_syslog":
o.RemoteSyslog = v.(string)
case "pidfile", "pid_file":
o.PidFile = v.(string)
case "ports_file_dir":
o.PortsFileDir = v.(string)
case "prof_port":
o.ProfPort = int(v.(int64))
case "max_control_line":
o.MaxControlLine = int(v.(int64))
case "max_payload":
o.MaxPayload = int(v.(int64))
case "max_pending":
o.MaxPending = v.(int64)
case "max_connections", "max_conn":
o.MaxConn = int(v.(int64))
case "max_subscriptions", "max_subs":
o.MaxSubs = int(v.(int64))
case "ping_interval":
o.PingInterval = time.Duration(int(v.(int64))) * time.Second
case "ping_max":
o.MaxPingsOut = int(v.(int64))
case "tls":
tc, err := parseTLS(tk)
if err != nil {
errors = append(errors, err)
continue
}
if o.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
o.TLSTimeout = tc.Timeout
case "write_deadline":
wd, ok := v.(string)
if ok {
dur, err := time.ParseDuration(wd)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing write_deadline: %v", err)}
errors = append(errors, err)
continue
}
o.WriteDeadline = dur
} else {
// Backward compatible with old type, assume this is the
// number of seconds.
o.WriteDeadline = time.Duration(v.(int64)) * time.Second
err := &configWarningErr{
field: k,
configErr: configErr{
token: tk,
reason: "write_deadline should be converted to a duration",
},
}
warnings = append(warnings, err)
}
case "lame_duck_duration":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_duration: %v", err)}
errors = append(errors, err)
continue
}
o.LameDuckDuration = dur
case "trusted":
switch v.(type) {
case string:
o.TrustedNkeys = []string{v.(string)}
case []string:
o.TrustedNkeys = v.([]string)
case []interface{}:
keys := make([]string, 0, len(v.([]interface{})))
for _, mv := range v.([]interface{}) {
tk, mv = unwrapValue(mv)
if key, ok := mv.(string); ok {
keys = append(keys, key)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type in array %T", mv)}
errors = append(errors, err)
continue
}
}
o.TrustedNkeys = keys
default:
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type %T", v)}
errors = append(errors, err)
}
// Do a quick sanity check on keys
for _, key := range o.TrustedNkeys {
if !nkeys.IsValidPublicOperatorKey(key) {
err := &configErr{tk, fmt.Sprintf("trust key %q required to be a valid public operator nkey", key)}
errors = append(errors, err)
}
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
errors = append(errors, err)
}
}
}
if len(errors) > 0 || len(warnings) > 0 {
return &processConfigErr{
errors: errors,
warnings: warnings,
}
}
return nil
}
// hostPort is simple struct to hold parsed listen/addr strings.
type hostPort struct {
host string
port int
}
// parseListen will parse listen option which is replacing host/net and port
func parseListen(v interface{}) (*hostPort, error) {
hp := &hostPort{}
switch vv := v.(type) {
// Only a port
case int64:
hp.port = int(vv)
case string:
host, port, err := net.SplitHostPort(vv)
if err != nil {
return nil, fmt.Errorf("Could not parse address string %q", vv)
}
hp.port, err = strconv.Atoi(port)
if err != nil {
return nil, fmt.Errorf("Could not parse port %q", port)
}
hp.host = host
}
return hp, nil
}
// parseCluster will parse the cluster config.
func parseCluster(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
tk, v := unwrapValue(v)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define cluster, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.Cluster.Host = hp.host
opts.Cluster.Port = hp.port
case "port":
opts.Cluster.Port = int(mv.(int64))
case "host", "net":
opts.Cluster.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
err := &configErr{tk, fmt.Sprintf("Cluster authorization does not allow multiple users")}
*errors = append(*errors, err)
continue
}
opts.Cluster.Username = auth.user
opts.Cluster.Password = auth.pass
opts.Cluster.AuthTimeout = auth.timeout
if auth.defaultPermissions != nil {
err := &configWarningErr{
field: mk,
configErr: configErr{
token: tk,
reason: `setting "permissions" within cluster authorization block is deprecated`,
},
}
*warnings = append(*warnings, err)
// Do not set permissions if they were specified in top-level cluster block.
if opts.Cluster.Permissions == nil {
setClusterPermissions(&opts.Cluster, auth.defaultPermissions)
}
}
case "routes":
ra := mv.([]interface{})
routes, errs := parseURLs(ra, "route")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
opts.Routes = routes
case "tls":
config, timeout, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.Cluster.TLSConfig = config
opts.Cluster.TLSTimeout = timeout
case "cluster_advertise", "advertise":
opts.Cluster.Advertise = mv.(string)
case "no_advertise":
opts.Cluster.NoAdvertise = mv.(bool)
case "connect_retries":
opts.Cluster.ConnectRetries = int(mv.(int64))
case "permissions":
perms, err := parseUserPermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
// This will possibly override permissions that were define in auth block
setClusterPermissions(&opts.Cluster, perms)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
func parseURLs(a []interface{}, typ string) ([]*url.URL, []error) {
var (
errors []error
urls = make([]*url.URL, 0, len(a))
)
for _, u := range a {
tk, u := unwrapValue(u)
sURL := u.(string)
url, err := parseURL(sURL, typ)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
urls = append(urls, url)
}
return urls, errors
}
func parseURL(u string, typ string) (*url.URL, error) {
urlStr := strings.TrimSpace(u)
url, err := url.Parse(urlStr)
if err != nil {
return nil, fmt.Errorf("error parsing %s url [%q]", typ, urlStr)
}
return url, nil
}
func parseGateway(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
tk, v := unwrapValue(v)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected gateway to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv)
switch strings.ToLower(mk) {
case "name":
o.Gateway.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Gateway.Host = hp.host
o.Gateway.Port = hp.port
case "port":
o.Gateway.Port = int(mv.(int64))
case "host", "net":
o.Gateway.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
*errors = append(*errors, &configErr{tk, "Gateway authorization does not allow multiple users"})
continue
}
o.Gateway.Username = auth.user
o.Gateway.Password = auth.pass
o.Gateway.AuthTimeout = auth.timeout
case "tls":
config, timeout, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
o.Gateway.TLSConfig = config
o.Gateway.TLSTimeout = timeout
case "advertise":
o.Gateway.Advertise = mv.(string)
case "connect_retries":
o.Gateway.ConnectRetries = int(mv.(int64))
case "default_permissions":
perms, err := parseGatewayPermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
o.Gateway.DefaultPermissions = perms
case "gateways":
gateways, err := parseGateways(mv, errors, warnings)
if err != nil {
return err
}
o.Gateway.Gateways = gateways
case "reject_unknown":
o.Gateway.RejectUnknown = mv.(bool)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// Parse TLS and returns a TLSConfig and TLSTimeout.
// Used by cluster and gateway parsing.
func getTLSConfig(tk token) (*tls.Config, float64, error) {
tc, err := parseTLS(tk)
if err != nil {
return nil, 0, err
}
config, err := GenTLSConfig(tc)
if err != nil {
err := &configErr{tk, err.Error()}
return nil, 0, err
}
// For clusters/gateways, we will force strict verification. We also act
// as both client and server, so will mirror the rootCA to the
// clientCA pool.
config.ClientAuth = tls.RequireAndVerifyClientCert
config.RootCAs = config.ClientCAs
return config, tc.Timeout, nil
}
func parseGateways(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteGatewayOpts, error) {
tk, v := unwrapValue(v)
// Make sure we have an array
ga, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected gateways field to be an array, got %T", v)}
}
gateways := []*RemoteGatewayOpts{}
for _, g := range ga {
tk, g = unwrapValue(g)
// Check its a map/struct
gm, ok := g.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected gateway entry to be a map/struct, got %v", g)})
continue
}
gateway := &RemoteGatewayOpts{}
for k, v := range gm {
tk, v = unwrapValue(v)
switch strings.ToLower(k) {
case "name":
gateway.Name = v.(string)
case "tls":
tls, timeout, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
gateway.TLSConfig = tls
gateway.TLSTimeout = timeout
case "url":
url, err := parseURL(v.(string), "gateway")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
gateway.URLs = append(gateway.URLs, url)
case "urls":
urls, errs := parseURLs(v.([]interface{}), "gateway")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
gateway.URLs = urls
case "permissions":
perms, err := parseGatewayPermissions(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
gateway.Permissions = perms
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
gateways = append(gateways, gateway)
}
return gateways, nil
}
// Sets cluster's permissions based on given pub/sub permissions,
// doing the appropriate translation.
func setClusterPermissions(opts *ClusterOpts, perms *Permissions) {
// Import is whether or not we will send a SUB for interest to the other side.
// Export is whether or not we will accept a SUB from the remote for a given subject.
// Both only effect interest registration.
// The parsing sets Import into Publish and Export into Subscribe, convert
// accordingly.
opts.Permissions = &RoutePermissions{
Import: perms.Publish,
Export: perms.Subscribe,
}
}
// Temp structures to hold account import and export defintions since they need
// to be processed after being parsed.
type export struct {
acc *Account
sub string
accs []string
}
type importStream struct {
acc *Account
an string
sub string
pre string
}
type importService struct {
acc *Account
an string
sub string
to string
}
// Checks if an account name is reserved.
func isReservedAccount(name string) bool {
return name == globalAccountName
}
// parseAccounts will parse the different accounts syntax.
func parseAccounts(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var (
importStreams []*importStream
importServices []*importService
exportStreams []*export
exportServices []*export
)
tk, v := unwrapValue(v)
switch vv := v.(type) {
// Simple array of account names.
case []interface{}, []string:
m := make(map[string]struct{}, len(v.([]interface{})))
for _, n := range v.([]interface{}) {
tk, name := unwrapValue(n)
ns := name.(string)
// Check for reserved names.
if isReservedAccount(ns) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", ns)}
*errors = append(*errors, err)
continue
}
if _, ok := m[ns]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate Account Entry: %s", ns)}
*errors = append(*errors, err)
continue
}
opts.Accounts = append(opts.Accounts, &Account{Name: ns})
m[ns] = struct{}{}
}
// More common map entry
case map[string]interface{}:
// Track users across accounts, must be unique across
// accounts and nkeys vs users.
uorn := make(map[string]struct{})
for aname, mv := range vv {
tk, amv := unwrapValue(mv)
// Skip referenced config vars within the account block.
if tk.IsUsedVariable() {
continue
}
// These should be maps.
mv, ok := amv.(map[string]interface{})
if !ok {
err := &configErr{tk, "Expected map entries for accounts"}
*errors = append(*errors, err)
continue
}
if isReservedAccount(aname) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", aname)}
*errors = append(*errors, err)
continue
}
acc := &Account{Name: aname}
opts.Accounts = append(opts.Accounts, acc)
for k, v := range mv {
tk, mv := unwrapValue(v)
switch strings.ToLower(k) {
case "nkey":
nk, ok := mv.(string)
if !ok || !nkeys.IsValidPublicAccountKey(nk) {
err := &configErr{tk, fmt.Sprintf("Not a valid public nkey for an account: %q", mv)}
*errors = append(*errors, err)
continue
}
acc.Nkey = nk
case "imports":
streams, services, err := parseAccountImports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
importStreams = append(importStreams, streams...)
importServices = append(importServices, services...)
case "exports":
streams, services, err := parseAccountExports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
exportStreams = append(exportStreams, streams...)
exportServices = append(exportServices, services...)
case "users":
nkeys, users, err := parseUsers(mv, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
for _, u := range users {
if _, ok := uorn[u.Username]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate user %q detected", u.Username)}
*errors = append(*errors, err)
continue
}
uorn[u.Username] = struct{}{}
u.Account = acc
}
opts.Users = append(opts.Users, users...)
for _, u := range nkeys {
if _, ok := uorn[u.Nkey]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate nkey %q detected", u.Nkey)}
*errors = append(*errors, err)
continue
}
uorn[u.Nkey] = struct{}{}
u.Account = acc
}
opts.Nkeys = append(opts.Nkeys, nkeys...)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
}
}
// Bail already if there are previous errors.
if len(*errors) > 0 {
return nil
}
// Parse Imports and Exports here after all accounts defined.
// Do exports first since they need to be defined for imports to succeed
// since we do permissions checks.
// Create a lookup map for accounts lookups.
am := make(map[string]*Account, len(opts.Accounts))
for _, a := range opts.Accounts {
am[a.Name] = a
}
// Do stream exports
for _, stream := range exportStreams {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range stream.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := stream.acc.AddStreamExport(stream.sub, accounts); err != nil {
msg := fmt.Sprintf("Error adding stream export %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range exportServices {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range service.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := service.acc.AddServiceExport(service.sub, accounts); err != nil {
msg := fmt.Sprintf("Error adding service export %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, stream := range importStreams {
ta := am[stream.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream import", stream.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := stream.acc.AddStreamImport(ta, stream.sub, stream.pre); err != nil {
msg := fmt.Sprintf("Error adding stream import %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range importServices {
ta := am[service.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service import", service.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.to == "" {
service.to = service.sub
}
if err := service.acc.AddServiceImport(ta, service.to, service.sub); err != nil {
msg := fmt.Sprintf("Error adding service import %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
return nil
}
// Parse the account imports
func parseAccountExports(v interface{}, acc *Account, errors, warnings *[]error) ([]*export, []*export, error) {
// This should be an array of objects/maps.
tk, v := unwrapValue(v)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Exports should be an array, got %T", v)}
}
var services []*export
var streams []*export
for _, v := range ims {
// Should have stream or service
stream, service, err := parseExportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Parse the account imports
func parseAccountImports(v interface{}, acc *Account, errors, warnings *[]error) ([]*importStream, []*importService, error) {
// This should be an array of objects/maps.
tk, v := unwrapValue(v)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Imports should be an array, got %T", v)}
}
var services []*importService
var streams []*importStream
for _, v := range ims {
// Should have stream or service
stream, service, err := parseImportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Helper to parse an embedded account description for imported services or streams.
func parseAccount(v map[string]interface{}, errors, warnings *[]error) (string, string, error) {
var accountName, subject string
for mk, mv := range v {
tk, mv := unwrapValue(mv)
switch strings.ToLower(mk) {
case "account":
accountName = mv.(string)
case "subject":
subject = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return accountName, subject, nil
}
// Parse an import stream or service.
// e.g.
// {stream: "public.>"} # No accounts means public.
// {stream: "synadia.private.>", accounts: [cncf, natsio]}
// {service: "pub.request"} # No accounts means public.
// {service: "pub.special.request", accounts: [nats.io]}
func parseExportStreamOrService(v interface{}, errors, warnings *[]error) (*export, *export, error) {
var (
curStream *export
curService *export
accounts []string
)
tk, v := unwrapValue(v)
vv, ok := v.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Export Items should be a map with type entry, got %T", v)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, fmt.Sprintf("Detected stream %q but already saw a service", mv)}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected stream name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curStream = &export{sub: mvs}
if accounts != nil {
curStream.accs = accounts
}
case "service":
if curStream != nil {
err := &configErr{tk, fmt.Sprintf("Detected service %q but already saw a stream", mv)}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected service name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curService = &export{sub: mvs}
if accounts != nil {
curService.accs = accounts
}
case "accounts":
for _, iv := range mv.([]interface{}) {
_, mv := unwrapValue(iv)
accounts = append(accounts, mv.(string))
}
if curStream != nil {
curStream.accs = accounts
} else if curService != nil {
curService.accs = accounts
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// Parse an import stream or service.
// e.g.
// {stream: {account: "synadia", subject:"public.synadia"}, prefix: "imports.synadia"}
// {stream: {account: "synadia", subject:"synadia.private.*"}}
// {service: {account: "synadia", subject: "pub.special.request"}, subject: "synadia.request"}
func parseImportStreamOrService(v interface{}, errors, warnings *[]error) (*importStream, *importService, error) {
var (
curStream *importStream
curService *importService
pre, to string
)
tk, mv := unwrapValue(v)
vv, ok := mv.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Import Items should be a map with type entry, got %T", mv)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, fmt.Sprintf("Detected stream but already saw a service")}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Stream entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, fmt.Sprintf("Expect an account name and a subject")}
*errors = append(*errors, err)
continue
}
curStream = &importStream{an: accountName, sub: subject}
if pre != "" {
curStream.pre = pre
}
case "service":
if curStream != nil {
err := &configErr{tk, fmt.Sprintf("Detected service but already saw a stream")}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Service entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, fmt.Sprintf("Expect an account name and a subject")}
*errors = append(*errors, err)
continue
}
curService = &importService{an: accountName, sub: subject}
if to != "" {
curService.to = to
}
case "prefix":
pre = mv.(string)
if curStream != nil {
curStream.pre = pre
}
case "to":
to = mv.(string)
if curService != nil {
curService.to = to
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// Helper function to parse Authorization configs.
func parseAuthorization(v interface{}, opts *Options, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
auth = &authorization{}
)
_, v = unwrapValue(v)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "token":
auth.token = mv.(string)
case "timeout":
at := float64(1)
switch mv.(type) {
case int64:
at = float64(mv.(int64))
case float64:
at = mv.(float64)
}
auth.timeout = at
case "users":
nkeys, users, err := parseUsers(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
auth.nkeys = nkeys
case "default_permission", "default_permissions", "permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.defaultPermissions = permissions
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
// Now check for permission defaults with multiple users, etc.
if auth.users != nil && auth.defaultPermissions != nil {
for _, user := range auth.users {
if user.Permissions == nil {
user.Permissions = auth.defaultPermissions
}
}
}
}
return auth, nil
}
// Helper function to parse multiple users array with optional permissions.
func parseUsers(mv interface{}, opts *Options, errors *[]error, warnings *[]error) ([]*NkeyUser, []*User, error) {
var (
tk token
keys []*NkeyUser
users = []*User{}
)
tk, mv = unwrapValue(mv)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
var (
user = &User{}
nkey = &NkeyUser{}
perms *Permissions
err error
)
for k, v := range um {
// Also needs to unwrap first
tk, v = unwrapValue(v)
switch strings.ToLower(k) {
case "nkey":
nkey.Nkey = v.(string)
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "permission", "permissions", "authorization":
perms, err = parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
// Place perms if we have them.
if perms != nil {
// nkey takes precedent.
if nkey.Nkey != "" {
nkey.Permissions = perms
} else {
user.Permissions = perms
}
}
// Check to make sure we have at least username and password if defined.
if nkey.Nkey == "" && (user.Username == "" || user.Password == "") {
return nil, nil, &configErr{tk, fmt.Sprintf("User entry requires a user and a password")}
} else if nkey.Nkey != "" {
// Make sure the nkey a proper public nkey for a user..
if !nkeys.IsValidPublicUserKey(nkey.Nkey) {
return nil, nil, &configErr{tk, fmt.Sprintf("Not a valid public nkey for a user")}
}
// If we have user or password defined here that is an error.
if user.Username != "" || user.Password != "" {
return nil, nil, &configErr{tk, fmt.Sprintf("Nkey users do not take usernames or passwords")}
}
keys = append(keys, nkey)
} else {
users = append(users, user)
}
}
return keys, users, nil
}
// Helper function to parse user/account permissions
func parseUserPermissions(mv interface{}, errors, warnings *[]error) (*Permissions, error) {
var (
tk token
p = &Permissions{}
)
tk, mv = unwrapValue(mv)
pm, ok := mv.(map[string]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected permissions to be a map/struct, got %+v", mv)}
}
for k, v := range pm {
tk, v = unwrapValue(v)
switch strings.ToLower(k) {
// For routes:
// Import is Publish
// Export is Subscribe
case "pub", "publish", "import":
perms, err := parseVariablePermissions(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Publish = perms
case "sub", "subscribe", "export":
perms, err := parseVariablePermissions(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Subscribe = perms
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Top level parser for authorization configurations.
func parseVariablePermissions(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
switch vv := v.(type) {
case map[string]interface{}:
// New style with allow and/or deny properties.
return parseSubjectPermission(vv, errors, warnings)
default:
// Old style
return parseOldPermissionStyle(v, errors, warnings)
}
}
// Helper function to parse gateway permissions
func parseGatewayPermissions(v interface{}, errors *[]error, warnings *[]error) (*GatewayPermissions, error) {
tk, v := unwrapValue(v)
pm, ok := v.(map[string]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected permissions to be a map/struct, got %+v", v)}
}
perms := &GatewayPermissions{}
for k, v := range pm {
tk, v := unwrapValue(v)
switch strings.ToLower(k) {
case "import":
sp, err := parseVariablePermissions(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
perms.Import = sp
case "export":
sp, err := parseVariablePermissions(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
perms.Export = sp
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return perms, nil
}
// Helper function to parse subject singeltons and/or arrays
func parseSubjects(v interface{}, errors, warnings *[]error) ([]string, error) {
tk, v := unwrapValue(v)
var subjects []string
switch vv := v.(type) {
case string:
subjects = append(subjects, vv)
case []string:
subjects = vv
case []interface{}:
for _, i := range vv {
tk, i := unwrapValue(i)
subject, ok := i.(string)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Subject in permissions array cannot be cast to string")}
}
subjects = append(subjects, subject)
}
default:
return nil, &configErr{tk, fmt.Sprintf("Expected subject permissions to be a subject, or array of subjects, got %T", v)}
}
if err := checkSubjectArray(subjects); err != nil {
return nil, &configErr{tk, err.Error()}
}
return subjects, nil
}
// Helper function to parse old style authorization configs.
func parseOldPermissionStyle(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
subjects, err := parseSubjects(v, errors, warnings)
if err != nil {
return nil, err
}
return &SubjectPermission{Allow: subjects}, nil
}
// Helper function to parse new style authorization into a SubjectPermission with Allow and Deny.
func parseSubjectPermission(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
m := v.(map[string]interface{})
if len(m) == 0 {
return nil, nil
}
p := &SubjectPermission{}
for k, v := range m {
tk, _ := unwrapValue(v)
switch strings.ToLower(k) {
case "allow":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Allow = subjects
case "deny":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Deny = subjects
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field name %q parsing subject permissions, only 'allow' or 'deny' are permitted", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Helper function to validate subjects, etc for account permissioning.
func checkSubjectArray(sa []string) error {
for _, s := range sa {
if !IsValidSubject(s) {
return fmt.Errorf("Subject %q is not a valid subject", s)
}
}
return nil
}
// PrintTLSHelpAndDie prints TLS usage and exits.
func PrintTLSHelpAndDie() {
fmt.Printf("%s", tlsUsage)
for k := range cipherMap {
fmt.Printf(" %s\n", k)
}
fmt.Printf("\nAvailable curve preferences include:\n")
for k := range curvePreferenceMap {
fmt.Printf(" %s\n", k)
}
os.Exit(0)
}
func parseCipher(cipherName string) (uint16, error) {
cipher, exists := cipherMap[cipherName]
if !exists {
return 0, fmt.Errorf("Unrecognized cipher %s", cipherName)
}
return cipher, nil
}
func parseCurvePreferences(curveName string) (tls.CurveID, error) {
curve, exists := curvePreferenceMap[curveName]
if !exists {
return 0, fmt.Errorf("Unrecognized curve preference %s", curveName)
}
return curve, nil
}
// Helper function to parse TLS configs.
func parseTLS(v interface{}) (*TLSConfigOpts, error) {
var (
tlsm map[string]interface{}
tc = TLSConfigOpts{}
)
_, v = unwrapValue(v)
tlsm = v.(map[string]interface{})
for mk, mv := range tlsm {
tk, mv := unwrapValue(mv)
switch strings.ToLower(mk) {
case "cert_file":
certFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, expected 'cert_file' to be filename")}
}
tc.CertFile = certFile
case "key_file":
keyFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, expected 'key_file' to be filename")}
}
tc.KeyFile = keyFile
case "ca_file":
caFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, expected 'ca_file' to be filename")}
}
tc.CaFile = caFile
case "verify":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, expected 'verify' to be a boolean")}
}
tc.Verify = verify
case "cipher_suites":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, 'cipher_suites' cannot be empty")}
}
tc.Ciphers = make([]uint16, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r)
cipher, err := parseCipher(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.Ciphers = append(tc.Ciphers, cipher)
}
case "curve_preferences":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, 'curve_preferences' cannot be empty")}
}
tc.CurvePreferences = make([]tls.CurveID, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r)
cps, err := parseCurvePreferences(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.CurvePreferences = append(tc.CurvePreferences, cps)
}
case "timeout":
at := float64(0)
switch mv.(type) {
case int64:
at = float64(mv.(int64))
case float64:
at = mv.(float64)
}
tc.Timeout = at
default:
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, unknown field [%q]", mk)}
}
}
// If cipher suites were not specified then use the defaults
if tc.Ciphers == nil {
tc.Ciphers = defaultCipherSuites()
}
// If curve preferences were not specified, then use the defaults
if tc.CurvePreferences == nil {
tc.CurvePreferences = defaultCurvePreferences()
}
return &tc, nil
}
// GenTLSConfig loads TLS related configuration parameters.
func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
// Now load in cert and private key
cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile)
if err != nil {
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("error parsing certificate: %v", err)
}
// Create the tls.Config from our options.
// We will determine the cipher suites that we prefer.
// FIXME(dlc) change if ARM based.
config := tls.Config{
MinVersion: tls.VersionTLS12,
CipherSuites: tc.Ciphers,
PreferServerCipherSuites: true,
CurvePreferences: tc.CurvePreferences,
Certificates: []tls.Certificate{cert},
}
// Require client certificates as needed
if tc.Verify {
config.ClientAuth = tls.RequireAndVerifyClientCert
}
// Add in CAs if applicable.
if tc.CaFile != "" {
rootPEM, err := ioutil.ReadFile(tc.CaFile)
if err != nil || rootPEM == nil {
return nil, err
}
pool := x509.NewCertPool()
ok := pool.AppendCertsFromPEM(rootPEM)
if !ok {
return nil, fmt.Errorf("failed to parse root ca certificate")
}
config.ClientCAs = pool
}
return &config, nil
}
// MergeOptions will merge two options giving preference to the flagOpts
// if the item is present.
func MergeOptions(fileOpts, flagOpts *Options) *Options {
if fileOpts == nil {
return flagOpts
}
if flagOpts == nil {
return fileOpts
}
// Merge the two, flagOpts override
opts := *fileOpts
if flagOpts.Port != 0 {
opts.Port = flagOpts.Port
}
if flagOpts.Host != "" {
opts.Host = flagOpts.Host
}
if flagOpts.ClientAdvertise != "" {
opts.ClientAdvertise = flagOpts.ClientAdvertise
}
if flagOpts.Username != "" {
opts.Username = flagOpts.Username
}
if flagOpts.Password != "" {
opts.Password = flagOpts.Password
}
if flagOpts.Authorization != "" {
opts.Authorization = flagOpts.Authorization
}
if flagOpts.HTTPPort != 0 {
opts.HTTPPort = flagOpts.HTTPPort
}
if flagOpts.Debug {
opts.Debug = true
}
if flagOpts.Trace {
opts.Trace = true
}
if flagOpts.Logtime {
opts.Logtime = true
}
if flagOpts.LogFile != "" {
opts.LogFile = flagOpts.LogFile
}
if flagOpts.PidFile != "" {
opts.PidFile = flagOpts.PidFile
}
if flagOpts.PortsFileDir != "" {
opts.PortsFileDir = flagOpts.PortsFileDir
}
if flagOpts.ProfPort != 0 {
opts.ProfPort = flagOpts.ProfPort
}
if flagOpts.Cluster.ListenStr != "" {
opts.Cluster.ListenStr = flagOpts.Cluster.ListenStr
}
if flagOpts.Cluster.NoAdvertise {
opts.Cluster.NoAdvertise = true
}
if flagOpts.Cluster.ConnectRetries != 0 {
opts.Cluster.ConnectRetries = flagOpts.Cluster.ConnectRetries
}
if flagOpts.Cluster.Advertise != "" {
opts.Cluster.Advertise = flagOpts.Cluster.Advertise
}
if flagOpts.RoutesStr != "" {
mergeRoutes(&opts, flagOpts)
}
return &opts
}
// RoutesFromStr parses route URLs from a string
func RoutesFromStr(routesStr string) []*url.URL {
routes := strings.Split(routesStr, ",")
if len(routes) == 0 {
return nil
}
routeUrls := []*url.URL{}
for _, r := range routes {
r = strings.TrimSpace(r)
u, _ := url.Parse(r)
routeUrls = append(routeUrls, u)
}
return routeUrls
}
// This will merge the flag routes and override anything that was present.
func mergeRoutes(opts, flagOpts *Options) {
routeUrls := RoutesFromStr(flagOpts.RoutesStr)
if routeUrls == nil {
return
}
opts.Routes = routeUrls
opts.RoutesStr = flagOpts.RoutesStr
}
// RemoveSelfReference removes this server from an array of routes
func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) {
var cleanRoutes []*url.URL
cport := strconv.Itoa(clusterPort)
selfIPs, err := getInterfaceIPs()
if err != nil {
return nil, err
}
for _, r := range routes {
host, port, err := net.SplitHostPort(r.Host)
if err != nil {
return nil, err
}
ipList, err := getURLIP(host)
if err != nil {
return nil, err
}
if cport == port && isIPInList(selfIPs, ipList) {
continue
}
cleanRoutes = append(cleanRoutes, r)
}
return cleanRoutes, nil
}
func isIPInList(list1 []net.IP, list2 []net.IP) bool {
for _, ip1 := range list1 {
for _, ip2 := range list2 {
if ip1.Equal(ip2) {
return true
}
}
}
return false
}
func getURLIP(ipStr string) ([]net.IP, error) {
ipList := []net.IP{}
ip := net.ParseIP(ipStr)
if ip != nil {
ipList = append(ipList, ip)
return ipList, nil
}
hostAddr, err := net.LookupHost(ipStr)
if err != nil {
return nil, fmt.Errorf("Error looking up host with route hostname: %v", err)
}
for _, addr := range hostAddr {
ip = net.ParseIP(addr)
if ip != nil {
ipList = append(ipList, ip)
}
}
return ipList, nil
}
func getInterfaceIPs() ([]net.IP, error) {
var localIPs []net.IP
interfaceAddr, err := net.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("Error getting self referencing address: %v", err)
}
for i := 0; i < len(interfaceAddr); i++ {
interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String())
if net.ParseIP(interfaceIP.String()) != nil {
localIPs = append(localIPs, interfaceIP)
} else {
return nil, fmt.Errorf("Error parsing self referencing address: %v", err)
}
}
return localIPs, nil
}
func processOptions(opts *Options) {
// Setup non-standard Go defaults
if opts.Host == "" {
opts.Host = DEFAULT_HOST
}
if opts.HTTPHost == "" {
// Default to same bind from server if left undefined
opts.HTTPHost = opts.Host
}
if opts.Port == 0 {
opts.Port = DEFAULT_PORT
} else if opts.Port == RANDOM_PORT {
// Choose randomly inside of net.Listen
opts.Port = 0
}
if opts.MaxConn == 0 {
opts.MaxConn = DEFAULT_MAX_CONNECTIONS
}
if opts.PingInterval == 0 {
opts.PingInterval = DEFAULT_PING_INTERVAL
}
if opts.MaxPingsOut == 0 {
opts.MaxPingsOut = DEFAULT_PING_MAX_OUT
}
if opts.TLSTimeout == 0 {
opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.AuthTimeout == 0 {
opts.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
}
if opts.Cluster.Port != 0 {
if opts.Cluster.Host == "" {
opts.Cluster.Host = DEFAULT_HOST
}
if opts.Cluster.TLSTimeout == 0 {
opts.Cluster.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Cluster.AuthTimeout == 0 {
opts.Cluster.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
}
}
if opts.MaxControlLine == 0 {
opts.MaxControlLine = MAX_CONTROL_LINE_SIZE
}
if opts.MaxPayload == 0 {
opts.MaxPayload = MAX_PAYLOAD_SIZE
}
if opts.MaxPending == 0 {
opts.MaxPending = MAX_PENDING_SIZE
}
if opts.WriteDeadline == time.Duration(0) {
opts.WriteDeadline = DEFAULT_FLUSH_DEADLINE
}
if opts.RQSubsSweep == time.Duration(0) {
opts.RQSubsSweep = DEFAULT_REMOTE_QSUBS_SWEEPER
}
if opts.MaxClosedClients == 0 {
opts.MaxClosedClients = DEFAULT_MAX_CLOSED_CLIENTS
}
if opts.LameDuckDuration == 0 {
opts.LameDuckDuration = DEFAULT_LAME_DUCK_DURATION
}
if opts.Gateway.Port != 0 {
if opts.Gateway.Host == "" {
opts.Gateway.Host = DEFAULT_HOST
}
}
}
// ConfigureOptions accepts a flag set and augment it with NATS Server
// specific flags. On success, an options structure is returned configured
// based on the selected flags and/or configuration file.
// The command line options take precedence to the ones in the configuration file.
func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, printTLSHelp func()) (*Options, error) {
opts := &Options{}
var (
showVersion bool
showHelp bool
showTLSHelp bool
signal string
configFile string
err error
)
fs.BoolVar(&showHelp, "h", false, "Show this message.")
fs.BoolVar(&showHelp, "help", false, "Show this message.")
fs.IntVar(&opts.Port, "port", 0, "Port to listen on.")
fs.IntVar(&opts.Port, "p", 0, "Port to listen on.")
fs.StringVar(&opts.Host, "addr", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "a", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "net", "", "Network host to listen on.")
fs.StringVar(&opts.ClientAdvertise, "client_advertise", "", "Client URL to advertise to other servers.")
fs.BoolVar(&opts.Debug, "D", false, "Enable Debug logging.")
fs.BoolVar(&opts.Debug, "debug", false, "Enable Debug logging.")
fs.BoolVar(&opts.Trace, "V", false, "Enable Trace logging.")
fs.BoolVar(&opts.Trace, "trace", false, "Enable Trace logging.")
fs.Bool("DV", false, "Enable Debug and Trace logging.")
fs.BoolVar(&opts.Logtime, "T", true, "Timestamp log entries.")
fs.BoolVar(&opts.Logtime, "logtime", true, "Timestamp log entries.")
fs.StringVar(&opts.Username, "user", "", "Username required for connection.")
fs.StringVar(&opts.Password, "pass", "", "Password required for connection.")
fs.StringVar(&opts.Authorization, "auth", "", "Authorization token required for connection.")
fs.IntVar(&opts.HTTPPort, "m", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPPort, "http_port", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "ms", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "https_port", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.StringVar(&configFile, "c", "", "Configuration file.")
fs.StringVar(&configFile, "config", "", "Configuration file.")
fs.BoolVar(&opts.CheckConfig, "t", false, "Check configuration and exit.")
fs.StringVar(&signal, "sl", "", "Send signal to gnatsd process (stop, quit, reopen, reload)")
fs.StringVar(&signal, "signal", "", "Send signal to gnatsd process (stop, quit, reopen, reload)")
fs.StringVar(&opts.PidFile, "P", "", "File to store process pid.")
fs.StringVar(&opts.PidFile, "pid", "", "File to store process pid.")
fs.StringVar(&opts.PortsFileDir, "ports_file_dir", "", "Creates a ports file in the specified directory (<executable_name>_<pid>.ports)")
fs.StringVar(&opts.LogFile, "l", "", "File to store logging output.")
fs.StringVar(&opts.LogFile, "log", "", "File to store logging output.")
fs.BoolVar(&opts.Syslog, "s", false, "Enable syslog as log method.")
fs.BoolVar(&opts.Syslog, "syslog", false, "Enable syslog as log method..")
fs.StringVar(&opts.RemoteSyslog, "r", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.StringVar(&opts.RemoteSyslog, "remote_syslog", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.BoolVar(&showVersion, "version", false, "Print version information.")
fs.BoolVar(&showVersion, "v", false, "Print version information.")
fs.IntVar(&opts.ProfPort, "profile", 0, "Profiling HTTP port")
fs.StringVar(&opts.RoutesStr, "routes", "", "Routes to actively solicit a connection.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster_listen", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.Advertise, "cluster_advertise", "", "Cluster URL to advertise to other servers.")
fs.BoolVar(&opts.Cluster.NoAdvertise, "no_advertise", false, "Advertise known cluster IPs to clients.")
fs.IntVar(&opts.Cluster.ConnectRetries, "connect_retries", 0, "For implicit routes, number of connect retries")
fs.BoolVar(&showTLSHelp, "help_tls", false, "TLS help.")
fs.BoolVar(&opts.TLS, "tls", false, "Enable TLS.")
fs.BoolVar(&opts.TLSVerify, "tlsverify", false, "Enable TLS with client verification.")
fs.StringVar(&opts.TLSCert, "tlscert", "", "Server certificate file.")
fs.StringVar(&opts.TLSKey, "tlskey", "", "Private key for server certificate.")
fs.StringVar(&opts.TLSCaCert, "tlscacert", "", "Client certificate CA for verification.")
// The flags definition above set "default" values to some of the options.
// Calling Parse() here will override the default options with any value
// specified from the command line. This is ok. We will then update the
// options with the content of the configuration file (if present), and then,
// call Parse() again to override the default+config with command line values.
// Calling Parse() before processing config file is necessary since configFile
// itself is a command line argument, and also Parse() is required in order
// to know if user wants simply to show "help" or "version", etc...
if err := fs.Parse(args); err != nil {
return nil, err
}
if showVersion {
printVersion()
return nil, nil
}
if showHelp {
printHelp()
return nil, nil
}
if showTLSHelp {
printTLSHelp()
return nil, nil
}
// Process args looking for non-flag options,
// 'version' and 'help' only for now
showVersion, showHelp, err = ProcessCommandLineArgs(fs)
if err != nil {
return nil, err
} else if showVersion {
printVersion()
return nil, nil
} else if showHelp {
printHelp()
return nil, nil
}
// Snapshot flag options.
FlagSnapshot = opts.Clone()
// Process signal control.
if signal != "" {
if err := processSignal(signal); err != nil {
return nil, err
}
}
// Parse config if given
if configFile != "" {
// This will update the options with values from the config file.
err := opts.ProcessConfigFile(configFile)
if err != nil {
if opts.CheckConfig {
return nil, err
}
// If only warnings then can still continue.
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) == 0 {
fmt.Fprint(os.Stderr, err)
return opts, nil
}
return nil, err
} else if opts.CheckConfig {
// Report configuration file syntax test was successful and exit.
return opts, nil
}
// Call this again to override config file options with options from command line.
// Note: We don't need to check error here since if there was an error, it would
// have been caught the first time this function was called (after setting up the
// flags).
fs.Parse(args)
} else if opts.CheckConfig {
return nil, fmt.Errorf("must specify [-c, --config] option to check configuration file syntax")
}
// Special handling of some flags
var (
flagErr error
tlsDisabled bool
tlsOverride bool
)
fs.Visit(func(f *flag.Flag) {
// short-circuit if an error was encountered
if flagErr != nil {
return
}
if strings.HasPrefix(f.Name, "tls") {
if f.Name == "tls" {
if !opts.TLS {
// User has specified "-tls=false", we need to disable TLS
opts.TLSConfig = nil
tlsDisabled = true
tlsOverride = false
return
}
tlsOverride = true
} else if !tlsDisabled {
tlsOverride = true
}
} else {
switch f.Name {
case "DV":
// Check value to support -DV=false
boolValue, _ := strconv.ParseBool(f.Value.String())
opts.Trace, opts.Debug = boolValue, boolValue
case "cluster", "cluster_listen":
// Override cluster config if explicitly set via flags.
flagErr = overrideCluster(opts)
case "routes":
// Keep in mind that the flag has updated opts.RoutesStr at this point.
if opts.RoutesStr == "" {
// Set routes array to nil since routes string is empty
opts.Routes = nil
return
}
routeUrls := RoutesFromStr(opts.RoutesStr)
opts.Routes = routeUrls
}
}
})
if flagErr != nil {
return nil, flagErr
}
// This will be true if some of the `-tls` params have been set and
// `-tls=false` has not been set.
if tlsOverride {
if err := overrideTLS(opts); err != nil {
return nil, err
}
}
// If we don't have cluster defined in the configuration
// file and no cluster listen string override, but we do
// have a routes override, we need to report misconfiguration.
if opts.RoutesStr != "" && opts.Cluster.ListenStr == "" && opts.Cluster.Host == "" && opts.Cluster.Port == 0 {
return nil, errors.New("solicited routes require cluster capabilities, e.g. --cluster")
}
return opts, nil
}
// overrideTLS is called when at least "-tls=true" has been set.
func overrideTLS(opts *Options) error {
if opts.TLSCert == "" {
return errors.New("TLS Server certificate must be present and valid")
}
if opts.TLSKey == "" {
return errors.New("TLS Server private key must be present and valid")
}
tc := TLSConfigOpts{}
tc.CertFile = opts.TLSCert
tc.KeyFile = opts.TLSKey
tc.CaFile = opts.TLSCaCert
tc.Verify = opts.TLSVerify
var err error
opts.TLSConfig, err = GenTLSConfig(&tc)
return err
}
// overrideCluster updates Options.Cluster if that flag "cluster" (or "cluster_listen")
// has explicitly be set in the command line. If it is set to empty string, it will
// clear the Cluster options.
func overrideCluster(opts *Options) error {
if opts.Cluster.ListenStr == "" {
// This one is enough to disable clustering.
opts.Cluster.Port = 0
return nil
}
clusterURL, err := url.Parse(opts.Cluster.ListenStr)
if err != nil {
return err
}
h, p, err := net.SplitHostPort(clusterURL.Host)
if err != nil {
return err
}
opts.Cluster.Host = h
_, err = fmt.Sscan(p, &opts.Cluster.Port)
if err != nil {
return err
}
if clusterURL.User != nil {
pass, hasPassword := clusterURL.User.Password()
if !hasPassword {
return errors.New("expected cluster password to be set")
}
opts.Cluster.Password = pass
user := clusterURL.User.Username()
opts.Cluster.Username = user
} else {
// Since we override from flag and there is no user/pwd, make
// sure we clear what we may have gotten from config file.
opts.Cluster.Username = ""
opts.Cluster.Password = ""
}
return nil
}
func processSignal(signal string) error {
var (
pid string
commandAndPid = strings.Split(signal, "=")
)
if l := len(commandAndPid); l == 2 {
pid = commandAndPid[1]
} else if l > 2 {
return fmt.Errorf("invalid signal parameters: %v", commandAndPid[2:])
}
if err := ProcessSignal(Command(commandAndPid[0]), pid); err != nil {
return err
}
os.Exit(0)
return nil
}
| 1 | 8,369 | We could probably do 1 sec if we have time, and hence exit early if we have small number of clients. So LB = 60 sec. Grace = 10secs. So 50Sec window, if NumClients < 50, Close 1 every second. Then exit when NumClients == 0. | nats-io-nats-server | go |
@@ -75,7 +75,7 @@ def combine_frames(this, *args, how="full"):
# If the same named index is found, that's used.
for this_column, this_name in this_index_map:
for that_col, that_name in that_index_map:
- if this_name == that_name:
+ if (this_column == that_col) and (this_name == that_name):
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = this._internal.scol_for(this_column) | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Commonly used utils in Koalas.
"""
import functools
from collections import OrderedDict
from distutils.version import LooseVersion
from typing import Callable, Dict, List, Tuple, Union
import pyarrow
import pyspark
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import FloatType
import pandas as pd
from pandas.api.types import is_list_like
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
def combine_frames(this, *args, how="full"):
"""
This method combines `this` DataFrame with a different `that` DataFrame or
Series from a different DataFrame.
It returns a DataFrame that has prefix `this_` and `that_` to distinct
the columns names from both DataFrames
It internally performs a join operation which can be expensive in general.
So, if `compute.ops_on_diff_frames` option is False,
this method throws an exception.
"""
from databricks.koalas import Series
from databricks.koalas import DataFrame
from databricks.koalas.config import get_option
if all(isinstance(arg, Series) for arg in args):
assert all(arg._kdf is args[0]._kdf for arg in args), \
"Currently only one different DataFrame (from given Series) is supported"
if this is args[0]._kdf:
return # We don't need to combine. All series is in this.
that = args[0]._kdf[list(args)]
elif len(args) == 1 and isinstance(args[0], DataFrame):
assert isinstance(args[0], DataFrame)
if this is args[0]:
return # We don't need to combine. `this` and `that` are same.
that = args[0]
else:
raise AssertionError("args should be single DataFrame or "
"single/multiple Series")
if get_option("compute.ops_on_diff_frames"):
this_index_map = this._internal.index_map
that_index_map = that._internal.index_map
assert len(this_index_map) == len(that_index_map)
join_scols = []
merged_index_scols = []
# If the same named index is found, that's used.
for this_column, this_name in this_index_map:
for that_col, that_name in that_index_map:
if this_name == that_name:
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = this._internal.scol_for(this_column)
that_scol = that._internal.scol_for(that_col)
join_scol = this_scol == that_scol
join_scols.append(join_scol)
merged_index_scols.append(
F.when(
this_scol.isNotNull(), this_scol
).otherwise(that_scol).alias(this_column))
break
else:
raise ValueError("Index names must be exactly matched currently.")
assert len(join_scols) > 0, "cannot join with no overlapping index names"
joined_df = this._sdf.alias("this").join(
that._sdf.alias("that"), on=join_scols, how=how)
joined_df = joined_df.select(
merged_index_scols +
[this[idx]._scol.alias("__this_%s" % this._internal.column_name_for(idx))
for idx in this._internal.column_index] +
[that[idx]._scol.alias("__that_%s" % that._internal.column_name_for(idx))
for idx in that._internal.column_index])
index_columns = set(this._internal.index_columns)
new_data_columns = [c for c in joined_df.columns if c not in index_columns]
level = max(this._internal.column_index_level, that._internal.column_index_level)
column_index = ([tuple(['this'] + ([''] * (level - len(idx))) + list(idx))
for idx in this._internal.column_index]
+ [tuple(['that'] + ([''] * (level - len(idx))) + list(idx))
for idx in that._internal.column_index])
column_index_names = ((([None] * (1 + level - len(this._internal.column_index_level)))
+ this._internal.column_index_names)
if this._internal.column_index_names is not None else None)
return DataFrame(
this._internal.copy(sdf=joined_df,
column_index=column_index,
column_scols=[scol_for(joined_df, col) for col in new_data_columns],
column_index_names=column_index_names))
else:
raise ValueError(
"Cannot combine the series or dataframe because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option.")
def align_diff_frames(resolve_func, this, that, fillna=True, how="full"):
"""
This method aligns two different DataFrames with a given `func`. Columns are resolved and
handled within the given `func`.
To use this, `compute.ops_on_diff_frames` should be True, for now.
:param resolve_func: Takes aligned (joined) DataFrame, the column of the current DataFrame, and
the column of another DataFrame. It returns an iterable that produces Series.
>>> from databricks.koalas.config import set_option, reset_option
>>>
>>> set_option("compute.ops_on_diff_frames", True)
>>>
>>> kdf1 = ks.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>> kdf2 = ks.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>>
>>> def func(kdf, this_column_index, that_column_index):
... kdf # conceptually this is A + B.
...
... # Within this function, Series from A or B can be performed against `kdf`.
... this_idx = this_column_index[0] # this is ('a',) from kdf1.
... that_idx = that_column_index[0] # this is ('a',) from kdf2.
... new_series = (kdf[this_idx] - kdf[that_idx]).rename(str(this_idx))
...
... # This new series will be placed in new DataFrame.
... yield (new_series, this_idx)
>>>
>>>
>>> align_diff_frames(func, kdf1, kdf2).sort_index()
a
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
>>> reset_option("compute.ops_on_diff_frames")
:param this: a DataFrame to align
:param that: another DataFrame to align
:param fillna: If True, it fills missing values in non-common columns in both `this` and `that`.
Otherwise, it returns as are.
:param how: join way. In addition, it affects how `resolve_func` resolves the column conflict.
- full: `resolve_func` should resolve only common columns from 'this' and 'that' DataFrames.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` and
'that_columns' in this function are B, C and B, C.
- left: `resolve_func` should resolve columns including that columns.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` is
B, C but `that_columns` are B, C, D.
:return: Aligned DataFrame
"""
assert how == "full" or how == "left"
this_column_index = this._internal.column_index
that_column_index = that._internal.column_index
common_column_index = set(this_column_index).intersection(that_column_index)
# 1. Full outer join given two dataframes.
combined = combine_frames(this, that, how=how)
# 2. Apply given function to transform the columns in a batch and keep the new columns.
combined_column_index = combined._internal.column_index
that_columns_to_apply = []
this_columns_to_apply = []
additional_that_columns = []
columns_to_keep = []
column_index_to_keep = []
for combined_idx in combined_column_index:
for common_idx in common_column_index:
if combined_idx == tuple(['this', *common_idx]):
this_columns_to_apply.append(combined_idx)
break
elif combined_idx == tuple(['that', *common_idx]):
that_columns_to_apply.append(combined_idx)
break
else:
if how == "left" and \
combined_idx in [tuple(['that', *idx]) for idx in that_column_index]:
# In this case, we will drop `that_columns` in `columns_to_keep` but passes
# it later to `func`. `func` should resolve it.
# Note that adding this into a separate list (`additional_that_columns`)
# is intentional so that `this_columns` and `that_columns` can be paired.
additional_that_columns.append(combined_idx)
elif fillna:
columns_to_keep.append(F.lit(None).cast(FloatType()).alias(str(combined_idx)))
column_index_to_keep.append(combined_idx)
else:
columns_to_keep.append(combined._internal.scol_for(combined_idx))
column_index_to_keep.append(combined_idx)
that_columns_to_apply += additional_that_columns
# Should extract columns to apply and do it in a batch in case
# it adds new columns for example.
if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0:
kser_set, column_index_applied = \
zip(*resolve_func(combined, this_columns_to_apply, that_columns_to_apply))
columns_applied = [c._scol for c in kser_set]
column_index_applied = list(column_index_applied)
else:
columns_applied = []
column_index_applied = []
applied = combined[columns_applied + columns_to_keep]
applied.columns = pd.MultiIndex.from_tuples(column_index_applied + column_index_to_keep)
# 3. Restore the names back and deduplicate columns.
this_idxes = OrderedDict()
# Add columns in an order of its original frame.
for this_idx in this_column_index:
for new_idx in applied._internal.column_index:
if new_idx[1:] not in this_idxes and this_idx == new_idx[1:]:
this_idxes[new_idx[1:]] = new_idx
# After that, we will add the rest columns.
other_idxes = OrderedDict()
for new_idx in applied._internal.column_index:
if new_idx[1:] not in this_idxes:
other_idxes[new_idx[1:]] = new_idx
kdf = applied[list(this_idxes.values()) + list(other_idxes.values())]
kdf.columns = kdf.columns.droplevel()
return kdf
def align_diff_series(func, this_series, *args, how="full"):
from databricks.koalas.base import IndexOpsMixin
from databricks.koalas.series import Series
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
combined = combine_frames(this_series.to_frame(), *cols, how=how)
that_columns = [combined['that'][arg._internal.column_index[0]]._scol
if isinstance(arg, IndexOpsMixin) else arg for arg in args]
scol = func(combined['this'][this_series._internal.column_index[0]]._scol,
*that_columns)
return Series(combined._internal.copy(scol=scol,
column_index=this_series._internal.column_index),
anchor=combined)
def default_session(conf=None):
if conf is None:
conf = dict()
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.15") and \
LooseVersion(pyspark.__version__) < LooseVersion("3.0"):
conf["spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT"] = "1"
conf["spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT"] = "1"
conf["spark.mesos.driverEnv.ARROW_PRE_0_15_IPC_FORMAT"] = "1"
conf["spark.kubernetes.driverEnv.ARROW_PRE_0_15_IPC_FORMAT"] = "1"
builder = spark.SparkSession.builder.appName("Koalas")
for key, value in conf.items():
builder = builder.config(key, value)
return builder.getOrCreate()
def validate_arguments_and_invoke_function(pobj: Union[pd.DataFrame, pd.Series],
koalas_func: Callable, pandas_func: Callable,
input_args: Dict):
"""
Invokes a pandas function.
This is created because different versions of pandas support different parameters, and as a
result when we code against the latest version, our users might get a confusing
"got an unexpected keyword argument" error if they are using an older version of pandas.
This function validates all the arguments, removes the ones that are not supported if they
are simply the default value (i.e. most likely the user didn't explicitly specify it). It
throws a TypeError if the user explicitly specify an argument that is not supported by the
pandas version available.
For example usage, look at DataFrame.to_html().
:param pobj: the pandas DataFrame or Series to operate on
:param koalas_func: koalas function, used to get default parameter values
:param pandas_func: pandas function, used to check whether pandas supports all the arguments
:param input_args: arguments to pass to the pandas function, often created by using locals().
Make sure locals() call is at the top of the function so it captures only
input parameters, rather than local variables.
:return: whatever pandas_func returns
"""
import inspect
# Makes a copy since whatever passed in is likely created by locals(), and we can't delete
# 'self' key from that.
args = input_args.copy()
del args['self']
if 'kwargs' in args:
# explode kwargs
kwargs = args['kwargs']
del args['kwargs']
args = {**args, **kwargs}
koalas_params = inspect.signature(koalas_func).parameters
pandas_params = inspect.signature(pandas_func).parameters
for param in koalas_params.values():
if param.name not in pandas_params:
if args[param.name] == param.default:
del args[param.name]
else:
raise TypeError(
("The pandas version [%s] available does not support parameter '%s' " +
"for function '%s'.") % (pd.__version__, param.name, pandas_func.__name__))
args['self'] = pobj
return pandas_func(**args)
def lazy_property(fn):
"""
Decorator that makes a property lazy-evaluated.
Copied from https://stevenloria.com/lazy-properties/
"""
attr_name = '_lazy_' + fn.__name__
@property
@functools.wraps(fn)
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
def scol_for(sdf: spark.DataFrame, column_name: str) -> spark.Column:
""" Return Spark Column for the given column name. """
return sdf['`{}`'.format(column_name)]
def column_index_level(column_index: List[Tuple[str, ...]]) -> int:
""" Return the level of the column index. """
if len(column_index) == 0:
return 0
else:
levels = set(0 if idx is None else len(idx) for idx in column_index)
assert len(levels) == 1, levels
return list(levels)[0]
def name_like_string(name: Union[str, Tuple]) -> str:
"""
Return the name-like strings from str or tuple of str
Examples
--------
>>> name = 'abc'
>>> name_like_string(name)
'abc'
>>> name = ('abc',)
>>> name_like_string(name)
'abc'
>>> name = ('a', 'b', 'c')
>>> name_like_string(name)
'(a, b, c)'
"""
if is_list_like(name):
name = tuple([str(n) for n in name])
else:
name = (str(name),)
return ('(%s)' % ', '.join(name)) if len(name) > 1 else name[0]
def validate_axis(axis=0, none_axis=0):
""" Check the given axis is valid. """
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: none_axis, 'index': 0, 'columns': 1}.get(axis, axis)
| 1 | 13,295 | can you rename `that_col` to `that_column`? | databricks-koalas | py |
@@ -19,7 +19,9 @@
/**
* Internal dependencies
*/
-import Widgets from './googlesitekit/widgets';
+import { Widget } from './googlesitekit/widgets';
+
+const Widgets = { componenets: Widget };
if ( typeof global.googlesitekit === 'undefined' ) {
global.googlesitekit = {}; | 1 | /**
* Public Widgets API entrypoint.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import Widgets from './googlesitekit/widgets';
if ( typeof global.googlesitekit === 'undefined' ) {
global.googlesitekit = {};
}
if ( global.googlesitekit.widgets === undefined ) {
global.googlesitekit.widgets = Widgets;
}
// This is only exported for Jest and is not used in production.
export default Widgets;
| 1 | 29,381 | This is not correct, it will override everything else that is currently in `Widgets`. This file shouldn't be modified at all, we should be adding `components.Widget` in `assets/js/googlesitekit/widgets/index.js`, where the rest of that object is defined. Oh, and there's a typo here :) | google-site-kit-wp | js |
@@ -152,6 +152,14 @@ void BalanceTask::invoke() {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Update meta failed, status " << resp;
ret_ = Result::FAILED;
+ } else if (kv_ != nullptr) {
+ if (LastUpdateTimeMan::update(kv_, time::WallClock::fastNowInMilliSec()) !=
+ kvstore::ResultCode::SUCCEEDED) {
+ LOG(INFO) << taskIdStr_ << "Update meta failed";
+ ret_ = Result::FAILED;
+ } else {
+ status_ = Status::REMOVE_PART_ON_SRC;
+ }
} else {
status_ = Status::REMOVE_PART_ON_SRC;
} | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "meta/processors/admin/BalanceTask.h"
#include <folly/synchronization/Baton.h>
#include "meta/processors/Common.h"
namespace nebula {
namespace meta {
#define SAVE_STATE() \
if (!saveInStore()) { \
ret_ = Result::FAILED; \
onError_(); \
return; \
}
const std::string kBalanceTaskTable = "__b_task__"; // NOLINT
void BalanceTask::invoke() {
CHECK_NOTNULL(client_);
if (ret_ == Result::INVALID) {
endTimeMs_ = time::WallClock::fastNowInMilliSec();
saveInStore();
LOG(ERROR) << taskIdStr_ << "Task invalid, status " << static_cast<int32_t>(status_);
onFinished_();
return;
}
if (ret_ == Result::FAILED) {
endTimeMs_ = time::WallClock::fastNowInMilliSec();
saveInStore();
LOG(ERROR) << taskIdStr_ << "Task failed, status " << static_cast<int32_t>(status_);
onError_();
return;
}
switch (status_) {
case Status::START: {
LOG(INFO) << taskIdStr_ << "Start to move part!";
status_ = Status::CHANGE_LEADER;
ret_ = Result::IN_PROGRESS;
startTimeMs_ = time::WallClock::fastNowInMilliSec();
}
// fallthrough
case Status::CHANGE_LEADER: {
LOG(INFO) << taskIdStr_ << "Ask the src to give up the leadership.";
SAVE_STATE();
if (srcLived_) {
client_->transLeader(spaceId_, partId_, src_).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Transfer leader failed, status " << resp;
if (resp == nebula::Status::PartNotFound()) {
ret_ = Result::INVALID;
} else {
ret_ = Result::FAILED;
}
} else {
status_ = Status::ADD_PART_ON_DST;
}
invoke();
});
break;
} else {
LOG(INFO) << taskIdStr_ << "Src host has been lost, so no need to transfer leader";
status_ = Status::ADD_PART_ON_DST;
}
}
// fallthrough
case Status::ADD_PART_ON_DST: {
LOG(INFO) << taskIdStr_ << "Open the part as learner on dst.";
SAVE_STATE();
client_->addPart(spaceId_, partId_, dst_, true).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Open part failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::ADD_LEARNER;
}
invoke();
});
break;
}
case Status::ADD_LEARNER: {
LOG(INFO) << taskIdStr_ << "Add learner dst.";
SAVE_STATE();
client_->addLearner(spaceId_, partId_, dst_).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Add learner failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::CATCH_UP_DATA;
}
invoke();
});
break;
}
case Status::CATCH_UP_DATA: {
LOG(INFO) << taskIdStr_ << "Waiting for the data catch up.";
SAVE_STATE();
client_->waitingForCatchUpData(spaceId_, partId_, dst_).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Catchup data failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::MEMBER_CHANGE_ADD;
}
invoke();
});
break;
}
case Status::MEMBER_CHANGE_ADD: {
LOG(INFO) << taskIdStr_ << "Send member change request to the leader"
<< ", it will add the new member on dst host";
SAVE_STATE();
client_->memberChange(spaceId_, partId_, dst_, true).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Add peer failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::MEMBER_CHANGE_REMOVE;
}
invoke();
});
break;
}
case Status::MEMBER_CHANGE_REMOVE: {
LOG(INFO) << taskIdStr_ << "Send member change request to the leader"
<< ", it will remove the old member on src host";
SAVE_STATE();
client_->memberChange(spaceId_, partId_, src_, false).thenValue(
[this] (auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Remove peer failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::UPDATE_PART_META;
}
invoke();
});
break;
}
case Status::UPDATE_PART_META: {
LOG(INFO) << taskIdStr_ << "Update meta for part.";
SAVE_STATE();
client_->updateMeta(spaceId_, partId_, src_, dst_).thenValue(
[this] (auto&& resp) {
// The callback will be called inside raft set value. So don't call invoke directly
// here.
LOG(INFO) << "Update meta succeeded!";
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Update meta failed, status " << resp;
ret_ = Result::FAILED;
} else {
status_ = Status::REMOVE_PART_ON_SRC;
}
invoke();
});
break;
}
case Status::REMOVE_PART_ON_SRC: {
LOG(INFO) << taskIdStr_ << "Close part on src host, srcLived " << srcLived_;
SAVE_STATE();
if (srcLived_) {
client_->removePart(spaceId_, partId_, src_).thenValue([this](auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Remove part failed, status " << resp;
ret_ = Result::FAILED;
} else {
ret_ = Result::SUCCEEDED;
status_ = Status::CHECK;
}
invoke();
});
break;
} else {
LOG(INFO) << taskIdStr_ << "Don't remove part on src " << src_;
status_ = Status::CHECK;
}
}
// fallthrough
case Status::CHECK: {
LOG(INFO) << taskIdStr_ << "Check the peers...";
SAVE_STATE();
client_->checkPeers(spaceId_, partId_).thenValue([this] (auto&& resp) {
if (!resp.ok()) {
LOG(INFO) << taskIdStr_ << "Check the peers failed, status " << resp;
ret_ = Result::FAILED;
} else {
ret_ = Result::SUCCEEDED;
status_ = Status::END;
}
invoke();
});
break;
}
case Status::END: {
LOG(INFO) << taskIdStr_ << "Part has been moved successfully!";
endTimeMs_ = time::WallClock::fastNowInSec();
SAVE_STATE();
onFinished_();
break;
}
}
return;
}
void BalanceTask::rollback() {
if (status_ < Status::UPDATE_PART_META) {
// TODO(heng): restart the part on its peers.
} else {
// TODO(heng): Go on the task.
}
}
bool BalanceTask::saveInStore() {
if (kv_) {
std::vector<kvstore::KV> data;
data.emplace_back(taskKey(), taskVal());
folly::Baton<true, std::atomic> baton;
bool ret = false;
kv_->asyncMultiPut(kDefaultSpaceId,
kDefaultPartId,
std::move(data),
[this, &ret, &baton] (kvstore::ResultCode code) {
if (kvstore::ResultCode::SUCCEEDED == code) {
ret = true;
} else {
LOG(INFO) << taskIdStr_ << "Can't persist task!";
}
baton.post();
});
baton.wait();
return ret;
}
return true;
}
std::string BalanceTask::taskKey() {
std::string str;
str.reserve(64);
str.append(reinterpret_cast<const char*>(kBalanceTaskTable.data()), kBalanceTaskTable.size());
str.append(reinterpret_cast<const char*>(&balanceId_), sizeof(balanceId_));
str.append(reinterpret_cast<const char*>(&spaceId_), sizeof(spaceId_));
str.append(reinterpret_cast<const char*>(&partId_), sizeof(partId_));
str.append(reinterpret_cast<const char*>(&src_), sizeof(src_));
str.append(reinterpret_cast<const char*>(&dst_), sizeof(dst_));
return str;
}
std::string BalanceTask::taskVal() {
std::string str;
str.reserve(32);
str.append(reinterpret_cast<const char*>(&status_), sizeof(status_));
str.append(reinterpret_cast<const char*>(&ret_), sizeof(ret_));
str.append(reinterpret_cast<const char*>(&srcLived_), sizeof(srcLived_));
str.append(reinterpret_cast<const char*>(&startTimeMs_), sizeof(startTimeMs_));
str.append(reinterpret_cast<const char*>(&endTimeMs_), sizeof(endTimeMs_));
return str;
}
std::string BalanceTask::prefix(BalanceID balanceId) {
std::string str;
str.reserve(32);
str.append(reinterpret_cast<const char*>(kBalanceTaskTable.data()), kBalanceTaskTable.size());
str.append(reinterpret_cast<const char*>(&balanceId), sizeof(balanceId));
return str;
}
std::tuple<BalanceID, GraphSpaceID, PartitionID, HostAddr, HostAddr>
BalanceTask::parseKey(const folly::StringPiece& rawKey) {
int32_t offset = kBalanceTaskTable.size();
auto balanceId = *reinterpret_cast<const BalanceID*>(rawKey.begin() + offset);
offset += sizeof(balanceId);
auto spaceId = *reinterpret_cast<const GraphSpaceID*>(rawKey.begin() + offset);
offset += sizeof(GraphSpaceID);
auto partId = *reinterpret_cast<const PartitionID*>(rawKey.begin() + offset);
offset += sizeof(PartitionID);
auto src = *reinterpret_cast<const HostAddr*>(rawKey.begin() + offset);
offset += sizeof(HostAddr);
auto dst = *reinterpret_cast<const HostAddr*>(rawKey.begin() + offset);
return std::make_tuple(balanceId, spaceId, partId, src, dst);
}
std::tuple<BalanceTask::Status, BalanceTask::Result, bool, int64_t, int64_t>
BalanceTask::parseVal(const folly::StringPiece& rawVal) {
int32_t offset = 0;
auto status = *reinterpret_cast<const BalanceTask::Status*>(rawVal.begin() + offset);
offset += sizeof(BalanceTask::Status);
auto ret = *reinterpret_cast<const BalanceTask::Result*>(rawVal.begin() + offset);
offset += sizeof(BalanceTask::Result);
auto srcLived = *reinterpret_cast<const bool*>(rawVal.begin() + offset);
offset += sizeof(bool);
auto start = *reinterpret_cast<const int64_t*>(rawVal.begin() + offset);
offset += sizeof(int64_t);
auto end = *reinterpret_cast<const int64_t*>(rawVal.begin() + offset);
return std::make_tuple(status, ret, srcLived, start, end);
}
} // namespace meta
} // namespace nebula
| 1 | 28,079 | Each task finished will result in the meta info updated? | vesoft-inc-nebula | cpp |
@@ -88,7 +88,7 @@ class PageSpeedInsightsSetup extends Component {
handleAPIKeyChange( e ) {
this.setState( {
- apikey: e.currentTarget.value,
+ apikey: e.currentTarget.value.replace( /\s/g, '' ),
disabled: 0 === e.currentTarget.value.length,
} );
} | 1 | /**
* TagmanagerSetup component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import Button from 'GoogleComponents/button';
import Link from 'GoogleComponents/link';
import data from 'GoogleComponents/data';
import PropTypes from 'prop-types';
import { Input, TextField } from 'SiteKitCore/material-components';
import { sendAnalyticsTrackingEvent } from 'GoogleUtil';
import { getSiteKitAdminURL } from 'GoogleUtil';
const { __ } = wp.i18n;
const { Component, Fragment } = wp.element;
class PageSpeedInsightsSetup extends Component {
constructor( props ) {
super( props );
this.state = {
apikey: '',
disabled: true,
};
this.handleAPIKeyChange = this.handleAPIKeyChange.bind( this );
this.handleAPIKeyEntry = this.handleAPIKeyEntry.bind( this );
this.handleEditKeyClick = this.handleEditKeyClick.bind( this );
}
componentDidMount() {
const apikey = googlesitekit.admin.settings ? googlesitekit.admin.settings.apikey : '';
const disabled = ! apikey || 0 === apikey.length;
const editing = disabled;
const startedEmpty = disabled;
// Load the inital value.
this.setState( {
apikey,
disabled,
editing,
startedEmpty,
} );
}
async handleAPIKeyEntry() {
const {
apikey,
startedEmpty,
} = this.state;
sendAnalyticsTrackingEvent( 'plugin_setup', 'apikey_entered' );
// Save the API key.
try {
await data.set( 'core', 'site', 'apikey', { apikey } );
// If the API key was previously unconfigured, continue to the dashboard.
if ( startedEmpty ) {
document.location = getSiteKitAdminURL( 'googlesitekit-dashboard' );
} else {
// Otherwise, end the edit mode.
this.setState( { editing: false } );
}
} catch ( err ) {
throw err;
}
}
handleEditKeyClick() {
this.setState( { editing: true } );
}
handleAPIKeyChange( e ) {
this.setState( {
apikey: e.currentTarget.value,
disabled: 0 === e.currentTarget.value.length,
} );
}
render() {
const { externalAPIKeyURL } = googlesitekit.admin;
const {
apikey,
disabled,
editing,
startedEmpty,
} = this.state;
const {
onSettingsPage,
} = this.props;
const externalAPIKeyURLLabel = 'developers.google.com/web/sitekit/apikey';
return (
<div className="googlesitekit-setup-module googlesitekit-setup-module--pagespeed-insights">
{
! onSettingsPage &&
<Fragment>
<div className="googlesitekit-setup-module__logo">
<img src={ googlesitekit.admin.assetsRoot + 'images/icon-pagespeed.png' } width={ 33 } alt=""/>
</div>
<h2 className="
googlesitekit-heading-3
googlesitekit-setup-module__title
">
{ __( 'PageSpeed Insights', 'google-site-kit' ) }
</h2>
</Fragment>
}
{
! editing ?
<p>
{ __( 'API connected.', 'google-site-kit' ) } <Link
href="#"
onClick={ this.handleEditKeyClick }
inherit
>
{ __( 'Edit key', 'google-site-kit' ) }
</Link>
</p> :
<Fragment>
{ startedEmpty &&
<Fragment>
<p>
{ __( 'Please generate an API key on ', 'google-site-kit' ) }
<Link
href= { externalAPIKeyURL }
target="_blank"
rel="noopener noreferrer"
external
inherit
>
{ externalAPIKeyURLLabel }
</Link>
</p>
<p>{ __( 'Enter it below to complete the setup for PageSpeed Insights.', 'google-site-kit' ) }</p>
</Fragment>
}
<div className="googlesitekit-setup-module__inputs">
<TextField
label={ __( 'API Key', 'google-site-kit' ) }
name="apiKey"
outlined
required
>
<Input
value={ apikey ? apikey : '' }
onChange={ this.handleAPIKeyChange }
autoComplete="off"
/>
</TextField>
</div>
<Button
onClick={ this.handleAPIKeyEntry }
disabled={ disabled }
>
{ startedEmpty ? __( 'Proceed', 'google-site-kit' ) : __( 'Save', 'google-site-kit' ) }
</Button>
</Fragment>
}
</div>
);
}
}
PageSpeedInsightsSetup.propTypes = {
onSettingsPage: PropTypes.bool,
finishSetup: PropTypes.func,
isEditing: PropTypes.bool,
};
PageSpeedInsightsSetup.defaultProps = {
onSettingsPage: true,
isEditing: false,
};
export default PageSpeedInsightsSetup;
| 1 | 24,375 | Quick follow-up @aaemnnosttv, why this and not `.trim()`? | google-site-kit-wp | js |
@@ -1,3 +1,4 @@
+/* eslint-disable sitekit/camelcase-acronyms */
/**
* Profile Select component tests.
* | 1 | /**
* Profile Select component tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import apiFetchMock from '@wordpress/api-fetch';
/**
* Internal dependencies
*/
import ProfileSelect from './ProfileSelect';
import { STORE_NAME, PROFILE_CREATE } from '../../datastore/constants';
import * as fixtures from '../../datastore/__fixtures__';
import { fireEvent, render, act } from '../../../../../../tests/js/test-utils';
// Mock apiFetch so we know if it's called.
jest.mock( '@wordpress/api-fetch' );
apiFetchMock.mockImplementation( ( ...args ) => {
// eslint-disable-next-line no-console
console.warn( 'apiFetch', ...args );
} );
const setupRegistry = ( { dispatch } ) => {
const { id, webPropertyId, accountId } = fixtures.propertiesProfiles.profiles[ 0 ];
dispatch( STORE_NAME ).setAccountID( accountId );
dispatch( STORE_NAME ).setPropertyID( webPropertyId );
dispatch( STORE_NAME ).setProfileID( id );
dispatch( STORE_NAME ).receiveGetProfiles( fixtures.propertiesProfiles.profiles, { accountID: accountId, propertyID: webPropertyId } );
};
const setupRegistryWithExistingTag = ( { dispatch } ) => {
const existingTag = {
accountID: fixtures.accountsPropertiesProfiles.profiles[ 0 ].accountId,
propertyID: fixtures.accountsPropertiesProfiles.profiles[ 0 ].webPropertyId,
};
const { id } = fixtures.propertiesProfiles.profiles[ 0 ];
dispatch( STORE_NAME ).setAccountID( existingTag.accountID );
dispatch( STORE_NAME ).setPropertyID( existingTag.propertyID );
dispatch( STORE_NAME ).setProfileID( id );
dispatch( STORE_NAME ).receiveGetProfiles( fixtures.accountsPropertiesProfiles.profiles, { accountID: existingTag.accountID, propertyID: existingTag.propertyID } );
dispatch( STORE_NAME ).receiveGetExistingTag( existingTag.propertyID );
};
const setupEmptyRegistry = ( { dispatch } ) => {
const accountID = fixtures.accountsPropertiesProfiles.profiles[ 0 ].accountId;
const propertyID = fixtures.accountsPropertiesProfiles.profiles[ 0 ].webPropertyId;
dispatch( STORE_NAME ).setSettings( {} );
dispatch( STORE_NAME ).receiveGetProfiles( [], { accountID, propertyID } );
};
describe( 'ProfileSelect', () => {
afterEach( () => apiFetchMock.mockClear() );
afterAll( () => jest.restoreAllMocks() );
it( 'should render an option for each analytics profile of the currently selected account and property.', async () => {
const { getAllByRole } = render( <ProfileSelect />, { setupRegistry } );
const listItems = getAllByRole( 'menuitem', { hidden: true } );
// Note: we do length + 1 here because there should also be an item for
// "Set up a new property".
expect( listItems ).toHaveLength( fixtures.accountsPropertiesProfiles.profiles.length + 1 );
} );
it( 'should display profile options of an existing account when present, and not be disabled.', async () => {
const { container, getAllByRole, registry } = render( <ProfileSelect />, { setupRegistry: setupRegistryWithExistingTag } );
const currentPropertyID = registry.select( STORE_NAME ).getPropertyID();
const existingTagPropertyID = registry.select( STORE_NAME ).getExistingTag();
expect( existingTagPropertyID ).toEqual( currentPropertyID );
const existingTagProfiles = fixtures.accountsPropertiesProfiles.profiles
.filter( ( { webPropertyId } ) => webPropertyId === existingTagPropertyID );
const listItems = getAllByRole( 'menuitem', { hidden: true } );
expect( listItems ).toHaveLength( existingTagProfiles.length + 1 );
const selectedText = container.querySelector( '.mdc-select__selected-text' );
expect( selectedText ).toHaveAttribute( 'aria-disabled', 'false' );
expect( container.querySelector( '.googlesitekit-analytics__select-profile' ) )
.not.toHaveClass( 'mdc-select--disabled' );
expect( apiFetchMock ).not.toHaveBeenCalled();
} );
it( 'should be disabled when in the absence of an valid account or property ID.', async () => {
const { container, registry } = render( <ProfileSelect />, { setupRegistry } );
const validAccountID = registry.select( STORE_NAME ).getAccountID();
// A valid accountID is provided, so ensure it is not currently disabled.
expect( container.querySelector( '.googlesitekit-analytics__select-profile' ) )
.not.toHaveClass( 'mdc-select--disabled' );
await act( () => registry.dispatch( STORE_NAME ).setAccountID( '0' ) );
// An empty accountID is invalid, so ensure the select IS currently disabled.
expect( container.querySelector( '.googlesitekit-analytics__select-profile' ) )
.toHaveClass( 'mdc-select--disabled' );
await act( () => registry.dispatch( STORE_NAME ).setAccountID( validAccountID ) );
await act( () => registry.dispatch( STORE_NAME ).setPropertyID( '0' ) );
// The accountID is valid, but an empty propertyID is invalid, so ensure the select IS currently disabled.
expect( container.querySelector( '.googlesitekit-analytics__select-profile' ) )
.toHaveClass( 'mdc-select--disabled' );
} );
it( 'should render a select box with only an option to create a new property if no properties are available.', async () => {
const { getAllByRole } = render( <ProfileSelect />, { setupRegistry: setupEmptyRegistry } );
const listItems = getAllByRole( 'menuitem', { hidden: true } );
expect( listItems ).toHaveLength( 1 );
expect( listItems[ 0 ].textContent ).toMatch( /set up a new view/i );
} );
it( 'should update profileID in the store when a new item is selected', async () => {
const { getByText, container, registry } = render( <ProfileSelect />, { setupRegistry } );
const originalProfileID = registry.select( STORE_NAME ).getProfileID();
// Click the label to expose the elements in the menu.
fireEvent.click( container.querySelector( '.mdc-floating-label' ) );
// Click this element to select it and fire the onChange event.
fireEvent.click( getByText( /set up a new view/i ) );
const newProfileID = registry.select( STORE_NAME ).getProfileID();
expect( originalProfileID ).not.toEqual( newProfileID );
expect( newProfileID ).toEqual( PROFILE_CREATE );
} );
} );
| 1 | 30,935 | I don't think we should have file-wide exceptions for this rule, let's annotate the individual instances. | google-site-kit-wp | js |
@@ -101,6 +101,8 @@ public class MailServiceBean implements java.io.Serializable {
private Session session;
public boolean sendSystemEmail(String to, String subject, String messageText) {
+ if (true) return true;
+
boolean sent = false;
String body = messageText + ResourceBundle.getBundle("Bundle").getString("notification.email.closing");
logger.fine("Sending email to " + to + ". Subject: <<<" + subject + ">>>. Body: " + body); | 1 | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package edu.harvard.iq.dataverse;
import com.sun.mail.smtp.SMTPSendFailedException;
import edu.harvard.iq.dataverse.authorization.groups.Group;
import edu.harvard.iq.dataverse.authorization.groups.GroupServiceBean;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.confirmemail.ConfirmEmailServiceBean;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean.Key;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.util.MailUtil;
import edu.harvard.iq.dataverse.util.SystemConfig;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.Properties;
import java.util.Map;
import java.util.HashMap;
import java.util.List;
import java.util.ResourceBundle;
import java.util.Set;
import java.util.logging.Logger;
import javax.annotation.Resource;
import javax.ejb.EJB;
import javax.ejb.Stateless;
import javax.mail.Message;
import javax.mail.MessagingException;
import javax.mail.Session;
import javax.mail.Transport;
import javax.mail.internet.AddressException;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeMessage;
import org.apache.commons.lang.StringUtils;
/**
*
* original author: roberttreacy
*/
@Stateless
public class MailServiceBean implements java.io.Serializable {
@EJB
UserNotificationServiceBean userNotificationService;
@EJB
DataverseServiceBean dataverseService;
@EJB
DataFileServiceBean dataFileService;
@EJB
DatasetServiceBean datasetService;
@EJB
DatasetVersionServiceBean versionService;
@EJB
SystemConfig systemConfig;
@EJB
SettingsServiceBean settingsService;
@EJB
PermissionServiceBean permissionService;
@EJB
GroupServiceBean groupService;
@EJB
ConfirmEmailServiceBean confirmEmailService;
private static final Logger logger = Logger.getLogger(MailServiceBean.class.getCanonicalName());
private static final String EMAIL_PATTERN = "^[_A-Za-z0-9-\\+]+(\\.[_A-Za-z0-9-]+)*@"
+ "[A-Za-z0-9-]+(\\.[A-Za-z0-9]+)*(\\.[A-Za-z]{2,})$";
/**
* Creates a new instance of MailServiceBean
*/
public MailServiceBean() {
}
public void sendMail(String host, String from, String to, String subject, String messageText) {
Properties props = System.getProperties();
props.put("mail.smtp.host", host);
Session session = Session.getDefaultInstance(props, null);
try {
Message msg = new MimeMessage(session);
msg.setFrom(new InternetAddress(from));
msg.setRecipients(Message.RecipientType.TO,
InternetAddress.parse(to, false));
msg.setSubject(subject);
msg.setText(messageText);
Transport.send(msg);
} catch (AddressException ae) {
ae.printStackTrace(System.out);
} catch (MessagingException me) {
me.printStackTrace(System.out);
}
}
@Resource(name = "mail/notifyMailSession")
private Session session;
public boolean sendSystemEmail(String to, String subject, String messageText) {
boolean sent = false;
String body = messageText + ResourceBundle.getBundle("Bundle").getString("notification.email.closing");
logger.fine("Sending email to " + to + ". Subject: <<<" + subject + ">>>. Body: " + body);
try {
Message msg = new MimeMessage(session);
InternetAddress systemAddress = getSystemAddress();
if (systemAddress != null) {
msg.setFrom(systemAddress);
msg.setSentDate(new Date());
msg.setRecipients(Message.RecipientType.TO,
InternetAddress.parse(to, false));
msg.setSubject(subject);
msg.setText(body);
try {
Transport.send(msg);
sent = true;
} catch (SMTPSendFailedException ssfe) {
logger.warning("Failed to send mail to " + to + " (SMTPSendFailedException)");
}
} else {
logger.fine("Skipping sending mail to " + to + ", because the \"no-reply\" address not set (" + Key.SystemEmail + " setting).");
}
} catch (AddressException ae) {
logger.warning("Failed to send mail to " + to);
ae.printStackTrace(System.out);
} catch (MessagingException me) {
logger.warning("Failed to send mail to " + to);
me.printStackTrace(System.out);
}
return sent;
}
private InternetAddress getSystemAddress() {
String systemEmail = settingsService.getValueForKey(Key.SystemEmail);
return MailUtil.parseSystemAddress(systemEmail);
}
//@Resource(name="mail/notifyMailSession")
public void sendMail(String from, String to, String subject, String messageText) {
sendMail(from, to, subject, messageText, new HashMap());
}
public void sendMail(String from, String to, String subject, String messageText, Map extraHeaders) {
try {
Message msg = new MimeMessage(session);
if (from.matches(EMAIL_PATTERN)) {
msg.setFrom(new InternetAddress(from));
} else {
// set fake from address; instead, add it as part of the message
//msg.setFrom(new InternetAddress("[email protected]"));
msg.setFrom(getSystemAddress());
messageText = "From: " + from + "\n\n" + messageText;
}
msg.setSentDate(new Date());
msg.setRecipients(Message.RecipientType.TO,
InternetAddress.parse(to, false));
msg.setSubject(subject);
msg.setText(messageText);
if (extraHeaders != null) {
for (Object key : extraHeaders.keySet()) {
String headerName = key.toString();
String headerValue = extraHeaders.get(key).toString();
msg.addHeader(headerName, headerValue);
}
}
Transport.send(msg);
} catch (AddressException ae) {
ae.printStackTrace(System.out);
} catch (MessagingException me) {
me.printStackTrace(System.out);
}
}
public Boolean sendNotificationEmail(UserNotification notification){
boolean retval = false;
String emailAddress = getUserEmailAddress(notification);
if (emailAddress != null){
Object objectOfNotification = getObjectOfNotification(notification);
if (objectOfNotification != null){
String messageText = getMessageTextBasedOnNotification(notification, objectOfNotification);
String subjectText = getSubjectTextBasedOnNotification(notification);
if (!(messageText.isEmpty() || subjectText.isEmpty())){
retval = sendSystemEmail(emailAddress, subjectText, messageText);
} else {
logger.warning("Skipping " + notification.getType() + " notification, because couldn't get valid message");
}
} else {
logger.warning("Skipping " + notification.getType() + " notification, because no valid Object was found");
}
} else {
logger.warning("Skipping " + notification.getType() + " notification, because email address is null");
}
return retval;
}
private String getSubjectTextBasedOnNotification(UserNotification userNotification) {
switch (userNotification.getType()) {
case ASSIGNROLE:
return ResourceBundle.getBundle("Bundle").getString("notification.email.assign.role.subject");
case REVOKEROLE:
return ResourceBundle.getBundle("Bundle").getString("notification.email.revoke.role.subject");
case CREATEDV:
return ResourceBundle.getBundle("Bundle").getString("notification.email.create.dataverse.subject");
case REQUESTFILEACCESS:
return ResourceBundle.getBundle("Bundle").getString("notification.email.request.file.access.subject");
case GRANTFILEACCESS:
return ResourceBundle.getBundle("Bundle").getString("notification.email.grant.file.access.subject");
case REJECTFILEACCESS:
return ResourceBundle.getBundle("Bundle").getString("notification.email.rejected.file.access.subject");
case MAPLAYERUPDATED:
return ResourceBundle.getBundle("Bundle").getString("notification.email.update.maplayer");
case MAPLAYERDELETEFAILED:
return ResourceBundle.getBundle("Bundle").getString("notification.email.maplayer.deletefailed.subject");
case CREATEDS:
return ResourceBundle.getBundle("Bundle").getString("notification.email.create.dataset.subject");
case SUBMITTEDDS:
return ResourceBundle.getBundle("Bundle").getString("notification.email.submit.dataset.subject");
case PUBLISHEDDS:
return ResourceBundle.getBundle("Bundle").getString("notification.email.publish.dataset.subject");
case RETURNEDDS:
return ResourceBundle.getBundle("Bundle").getString("notification.email.returned.dataset.subject");
case CREATEACC:
return ResourceBundle.getBundle("Bundle").getString("notification.email.create.account.subject");
case CHECKSUMFAIL:
return ResourceBundle.getBundle("Bundle").getString("notification.email.checksumfail.subject");
case FILESYSTEMIMPORT:
return ResourceBundle.getBundle("Bundle").getString("notification.email.import.filesystem.subject");
case CHECKSUMIMPORT:
return ResourceBundle.getBundle("Bundle").getString("notification.email.import.checksum.subject");
}
return "";
}
private String getDatasetManageFileAccessLink(DataFile datafile){
return systemConfig.getDataverseSiteUrl() + "/permissions-manage-files.xhtml?id=" + datafile.getOwner().getId();
}
private String getDatasetLink(Dataset dataset){
return systemConfig.getDataverseSiteUrl() + "/dataset.xhtml?persistentId=" + dataset.getGlobalId();
}
private String getDatasetDraftLink(Dataset dataset){
return systemConfig.getDataverseSiteUrl() + "/dataset.xhtml?persistentId=" + dataset.getGlobalId() + "&version=DRAFT" + "&faces-redirect=true";
}
private String getDataverseLink(Dataverse dataverse){
return systemConfig.getDataverseSiteUrl() + "/dataverse/" + dataverse.getAlias();
}
/**
* Returns a '/'-separated string of roles that are effective for {@code au}
* over {@code dvObj}. Traverses the containment hierarchy of the {@code d}.
* Takes into consideration all groups that {@code au} is part of.
* @param au The authenticated user whose role assignments we look for.
* @param dvObj The Dataverse object over which the roles are assigned
* @return A set of all the role assignments for {@code ra} over {@code d}.
*/
private String getRoleStringFromUser(AuthenticatedUser au, DvObject dvObj) {
// Find user's role(s) for given dataverse/dataset
Set<RoleAssignment> roles = permissionService.assignmentsFor(au, dvObj);
List<String> roleNames = new ArrayList();
// Include roles derived from a user's groups
Set<Group> groupsUserBelongsTo = groupService.groupsFor(au, dvObj);
for (Group g : groupsUserBelongsTo) {
roles.addAll(permissionService.assignmentsFor(g, dvObj));
}
for (RoleAssignment ra : roles) {
roleNames.add(ra.getRole().getName());
}
return StringUtils.join(roleNames, "/");
}
/**
* Returns the URL to a given {@code DvObject} {@code d}. If {@code d} is a
* {@code DataFile}, return a link to its {@code DataSet}.
* @param d The Dataverse object to get a link for.
* @return A string with a URL to the given Dataverse object.
*/
private String getDvObjectLink(DvObject d) {
if (d instanceof Dataverse) {
return getDataverseLink((Dataverse) d);
} else if (d instanceof Dataset) {
return getDatasetLink((Dataset) d);
} else if (d instanceof DataFile) {
return getDatasetLink(((DataFile) d).getOwner());
}
return "";
}
/**
* Returns string representation of the type of {@code DvObject} {@code d}.
* @param d The Dataverse object to get the string for
* @return A string that represents the type of a given Dataverse object.
*/
private String getDvObjectTypeString(DvObject d) {
if (d instanceof Dataverse) {
return "dataverse";
} else if (d instanceof Dataset) {
return "dataset";
} else if (d instanceof DataFile) {
return "data file";
}
return "";
}
private String getMessageTextBasedOnNotification(UserNotification userNotification, Object targetObject){
String messageText = ResourceBundle.getBundle("Bundle").getString("notification.email.greeting");
DatasetVersion version = null;
Dataset dataset = null;
DvObject dvObj = null;
String dvObjURL = null;
String dvObjTypeStr = null;
String pattern ="";
switch (userNotification.getType()) {
case ASSIGNROLE:
AuthenticatedUser au = userNotification.getUser();
dvObj = (DvObject) targetObject;
String joinedRoleNames = getRoleStringFromUser(au, dvObj);
dvObjURL = getDvObjectLink(dvObj);
dvObjTypeStr = getDvObjectTypeString(dvObj);
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.assignRole");
String[] paramArrayAssignRole = {joinedRoleNames, dvObjTypeStr, dvObj.getDisplayName(), dvObjURL};
messageText += MessageFormat.format(pattern, paramArrayAssignRole);
if (joinedRoleNames.contains("File Downloader")){
if (dvObjTypeStr.equals("dataset")){
pattern = ResourceBundle.getBundle("Bundle").getString("notification.access.granted.fileDownloader.additionalDataset");
String[] paramArrayAssignRoleDS = {" "};
messageText += MessageFormat.format(pattern, paramArrayAssignRoleDS);
}
if (dvObjTypeStr.equals("dataverse")){
pattern = ResourceBundle.getBundle("Bundle").getString("notification.access.granted.fileDownloader.additionalDataverse");
String[] paramArrayAssignRoleDV = {" "};
messageText += MessageFormat.format(pattern, paramArrayAssignRoleDV);
}
}
return messageText;
case REVOKEROLE:
dvObj = (DvObject) targetObject;
dvObjURL = getDvObjectLink(dvObj);
dvObjTypeStr = getDvObjectTypeString(dvObj);
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.revokeRole");
String[] paramArrayRevokeRole = {dvObjTypeStr, dvObj.getDisplayName(), dvObjURL};
messageText += MessageFormat.format(pattern, paramArrayRevokeRole);
return messageText;
case CREATEDV:
Dataverse dataverse = (Dataverse) targetObject;
Dataverse parentDataverse = dataverse.getOwner();
// initialize to empty string in the rare case that there is no parent dataverse (i.e. root dataverse just created)
String parentDataverseDisplayName = "";
String parentDataverseUrl = "";
if (parentDataverse != null) {
parentDataverseDisplayName = parentDataverse.getDisplayName();
parentDataverseUrl = getDataverseLink(parentDataverse);
}
String dataverseCreatedMessage = BundleUtil.getStringFromBundle("notification.email.createDataverse", Arrays.asList(
dataverse.getDisplayName(),
getDataverseLink(dataverse),
parentDataverseDisplayName,
parentDataverseUrl,
systemConfig.getGuidesBaseUrl(),
systemConfig.getGuidesVersion()));
logger.fine(dataverseCreatedMessage);
return messageText += dataverseCreatedMessage;
case REQUESTFILEACCESS:
DataFile datafile = (DataFile) targetObject;
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.requestFileAccess");
String[] paramArrayRequestFileAccess = {datafile.getOwner().getDisplayName(), getDatasetManageFileAccessLink(datafile)};
messageText += MessageFormat.format(pattern, paramArrayRequestFileAccess);
return messageText;
case GRANTFILEACCESS:
dataset = (Dataset) targetObject;
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.grantFileAccess");
String[] paramArrayGrantFileAccess = {dataset.getDisplayName(), getDatasetLink(dataset)};
messageText += MessageFormat.format(pattern, paramArrayGrantFileAccess);
return messageText;
case REJECTFILEACCESS:
dataset = (Dataset) targetObject;
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.rejectFileAccess");
String[] paramArrayRejectFileAccess = {dataset.getDisplayName(), getDatasetLink(dataset)};
messageText += MessageFormat.format(pattern, paramArrayRejectFileAccess);
return messageText;
case CREATEDS:
version = (DatasetVersion) targetObject;
String datasetCreatedMessage = BundleUtil.getStringFromBundle("notification.email.createDataset", Arrays.asList(
version.getDataset().getDisplayName(),
getDatasetLink(version.getDataset()),
version.getDataset().getOwner().getDisplayName(),
getDataverseLink(version.getDataset().getOwner()),
systemConfig.getGuidesBaseUrl(),
systemConfig.getGuidesVersion()
));
logger.fine(datasetCreatedMessage);
return messageText += datasetCreatedMessage;
case MAPLAYERUPDATED:
version = (DatasetVersion) targetObject;
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.worldMap.added");
String[] paramArrayMapLayer = {version.getDataset().getDisplayName(), getDatasetLink(version.getDataset())};
messageText += MessageFormat.format(pattern, paramArrayMapLayer);
return messageText;
case MAPLAYERDELETEFAILED:
FileMetadata targetFileMetadata = (FileMetadata) targetObject;
version = targetFileMetadata.getDatasetVersion();
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.maplayer.deletefailed.text");
String[] paramArrayMapLayerDelete = {targetFileMetadata.getLabel(), getDatasetLink(version.getDataset())};
messageText += MessageFormat.format(pattern, paramArrayMapLayerDelete);
return messageText;
case SUBMITTEDDS:
version = (DatasetVersion) targetObject;
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.wasSubmittedForReview");
String[] paramArraySubmittedDataset = {version.getDataset().getDisplayName(), getDatasetDraftLink(version.getDataset()),
version.getDataset().getOwner().getDisplayName(), getDataverseLink(version.getDataset().getOwner())};
messageText += MessageFormat.format(pattern, paramArraySubmittedDataset);
return messageText;
case PUBLISHEDDS:
version = (DatasetVersion) targetObject;
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.wasPublished");
String[] paramArrayPublishedDataset = {version.getDataset().getDisplayName(), getDatasetLink(version.getDataset()),
version.getDataset().getOwner().getDisplayName(), getDataverseLink(version.getDataset().getOwner())};
messageText += MessageFormat.format(pattern, paramArrayPublishedDataset);
return messageText;
case RETURNEDDS:
version = (DatasetVersion) targetObject;
pattern = ResourceBundle.getBundle("Bundle").getString("notification.email.wasReturnedByReviewer");
String[] paramArrayReturnedDataset = {version.getDataset().getDisplayName(), getDatasetDraftLink(version.getDataset()),
version.getDataset().getOwner().getDisplayName(), getDataverseLink(version.getDataset().getOwner())};
messageText += MessageFormat.format(pattern, paramArrayReturnedDataset);
return messageText;
case CREATEACC:
String accountCreatedMessage = BundleUtil.getStringFromBundle("notification.email.welcome", Arrays.asList(
systemConfig.getGuidesBaseUrl(),
systemConfig.getGuidesVersion()
));
String optionalConfirmEmailAddon = confirmEmailService.optionalConfirmEmailAddonMsg(userNotification.getUser());
accountCreatedMessage += optionalConfirmEmailAddon;
logger.fine("accountCreatedMessage: " + accountCreatedMessage);
return messageText += accountCreatedMessage;
case CHECKSUMFAIL:
version = (DatasetVersion) targetObject;
String checksumFailMsg = BundleUtil.getStringFromBundle("notification.checksumfail", Arrays.asList(
version.getDataset().getGlobalId()
));
logger.info("checksumFailMsg: " + checksumFailMsg);
return messageText += checksumFailMsg;
case FILESYSTEMIMPORT:
version = (DatasetVersion) targetObject;
String fileImportMsg = BundleUtil.getStringFromBundle("notification.import.filesystem", Arrays.asList(
systemConfig.getDataverseSiteUrl(),
version.getDataset().getGlobalId(),
version.getDataset().getDisplayName()
));
logger.info("fileImportMsg: " + fileImportMsg);
return messageText += fileImportMsg;
case CHECKSUMIMPORT:
version = (DatasetVersion) targetObject;
String checksumImportMsg = BundleUtil.getStringFromBundle("notification.import.checksum", Arrays.asList(
version.getDataset().getGlobalId(),
version.getDataset().getDisplayName()
));
logger.info("checksumImportMsg: " + checksumImportMsg);
return messageText += checksumImportMsg;
}
return "";
}
private Object getObjectOfNotification (UserNotification userNotification){
switch (userNotification.getType()) {
case ASSIGNROLE:
case REVOKEROLE:
// Can either be a dataverse or dataset, so search both
Dataverse dataverse = dataverseService.find(userNotification.getObjectId());
if (dataverse != null) return dataverse;
Dataset dataset = datasetService.find(userNotification.getObjectId());
return dataset;
case CREATEDV:
return dataverseService.find(userNotification.getObjectId());
case REQUESTFILEACCESS:
return dataFileService.find(userNotification.getObjectId());
case GRANTFILEACCESS:
case REJECTFILEACCESS:
return datasetService.find(userNotification.getObjectId());
case MAPLAYERDELETEFAILED:
return dataFileService.findFileMetadata(userNotification.getObjectId());
case MAPLAYERUPDATED:
case CREATEDS:
case SUBMITTEDDS:
case PUBLISHEDDS:
case RETURNEDDS:
return versionService.find(userNotification.getObjectId());
case CREATEACC:
return userNotification.getUser();
case CHECKSUMFAIL:
return datasetService.find(userNotification.getObjectId());
case FILESYSTEMIMPORT:
return versionService.find(userNotification.getObjectId());
case CHECKSUMIMPORT:
return versionService.find(userNotification.getObjectId());
}
return null;
}
private String getUserEmailAddress(UserNotification notification) {
if (notification != null) {
if (notification.getUser() != null) {
if (notification.getUser().getDisplayInfo() != null) {
if (notification.getUser().getDisplayInfo().getEmailAddress() != null) {
logger.fine("Email address: "+notification.getUser().getDisplayInfo().getEmailAddress());
return notification.getUser().getDisplayInfo().getEmailAddress();
}
}
}
}
logger.fine("no email address");
return null;
}
}
| 1 | 36,091 | @raprasad you plan to take this "if true" out, right? | IQSS-dataverse | java |
@@ -496,11 +496,13 @@ class ConsoleMaster(flow.FlowMaster):
self.eventlog = not self.eventlog
self.view_flowlist()
- def _readflow(self, path):
- path = os.path.expanduser(path)
+ def _readflow(self, paths):
try:
- f = file(path, "rb")
- flows = list(flow.FlowReader(f).stream())
+ flows = []
+ for path in paths:
+ path = os.path.expanduser(path)
+ f = file(path, "rb")
+ flows.extend(list(flow.FlowReader(f).stream()))
except (IOError, flow.FlowReadError), v:
return True, v.strerror
return False, flows | 1 | from __future__ import absolute_import
import mailcap, mimetypes, tempfile, os, subprocess, glob, time, shlex, stat
import os.path, sys, weakref, traceback
import urwid
from .. import controller, utils, flow, script, proxy
from . import flowlist, flowview, help, common, grideditor, palettes, contentview, flowdetailview
EVENTLOG_SIZE = 500
class Stop(Exception): pass
class _PathCompleter:
def __init__(self, _testing=False):
"""
_testing: disables reloading of the lookup table to make testing possible.
"""
self.lookup, self.offset = None, None
self.final = None
self._testing = _testing
def reset(self):
self.lookup = None
self.offset = -1
def complete(self, txt):
"""
Returns the next completion for txt, or None if there is no completion.
"""
path = os.path.expanduser(txt)
if not self.lookup:
if not self._testing:
# Lookup is a set of (display value, actual value) tuples.
self.lookup = []
if os.path.isdir(path):
files = glob.glob(os.path.join(path, "*"))
prefix = txt
else:
files = glob.glob(path+"*")
prefix = os.path.dirname(txt)
prefix = prefix or "./"
for f in files:
display = os.path.join(prefix, os.path.basename(f))
if os.path.isdir(f):
display += "/"
self.lookup.append((display, f))
if not self.lookup:
self.final = path
return path
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.final = ret[1]
return ret[0]
#begin nocover
class PathEdit(urwid.Edit, _PathCompleter):
def __init__(self, *args, **kwargs):
urwid.Edit.__init__(self, *args, **kwargs)
_PathCompleter.__init__(self)
def keypress(self, size, key):
if key == "tab":
comp = self.complete(self.get_edit_text())
self.set_edit_text(comp)
self.set_edit_pos(len(comp))
else:
self.reset()
return urwid.Edit.keypress(self, size, key)
class ActionBar(common.WWrap):
def __init__(self):
self.message("")
def selectable(self):
return True
def path_prompt(self, prompt, text):
self.expire = None
self.w = PathEdit(prompt, text)
def prompt(self, prompt, text = ""):
self.expire = None
# A (partial) workaround for this Urwid issue:
# https://github.com/Nic0/tyrs/issues/115
# We can remove it once veryone is beyond 1.0.1
if isinstance(prompt, basestring):
prompt = unicode(prompt)
self.w = urwid.Edit(prompt, text or "")
def message(self, message, expire=None):
self.expire = expire
self.w = urwid.Text(message)
class StatusBar(common.WWrap):
def __init__(self, master, helptext):
self.master, self.helptext = master, helptext
self.ab = ActionBar()
self.ib = common.WWrap(urwid.Text(""))
self.w = urwid.Pile([self.ib, self.ab])
def get_status(self):
r = []
if self.master.setheaders.count():
r.append("[")
r.append(("heading_key", "H"))
r.append("eaders]")
if self.master.replacehooks.count():
r.append("[")
r.append(("heading_key", "R"))
r.append("eplacing]")
if self.master.client_playback:
r.append("[")
r.append(("heading_key", "cplayback"))
r.append(":%s to go]"%self.master.client_playback.count())
if self.master.server_playback:
r.append("[")
r.append(("heading_key", "splayback"))
if self.master.nopop:
r.append(":%s in file]"%self.master.server_playback.count())
else:
r.append(":%s to go]"%self.master.server_playback.count())
if self.master.get_ignore_filter():
r.append("[")
r.append(("heading_key", "I"))
r.append("gnore:%d]" % len(self.master.get_ignore_filter()))
if self.master.get_tcp_filter():
r.append("[")
r.append(("heading_key", "T"))
r.append("CP:%d]" % len(self.master.get_tcp_filter()))
if self.master.state.intercept_txt:
r.append("[")
r.append(("heading_key", "i"))
r.append(":%s]"%self.master.state.intercept_txt)
if self.master.state.limit_txt:
r.append("[")
r.append(("heading_key", "l"))
r.append(":%s]"%self.master.state.limit_txt)
if self.master.stickycookie_txt:
r.append("[")
r.append(("heading_key", "t"))
r.append(":%s]"%self.master.stickycookie_txt)
if self.master.stickyauth_txt:
r.append("[")
r.append(("heading_key", "u"))
r.append(":%s]"%self.master.stickyauth_txt)
if self.master.state.default_body_view.name != "Auto":
r.append("[")
r.append(("heading_key", "M"))
r.append(":%s]"%self.master.state.default_body_view.name)
opts = []
if self.master.anticache:
opts.append("anticache")
if self.master.anticomp:
opts.append("anticomp")
if self.master.showhost:
opts.append("showhost")
if not self.master.refresh_server_playback:
opts.append("norefresh")
if self.master.killextra:
opts.append("killextra")
if self.master.server.config.no_upstream_cert:
opts.append("no-upstream-cert")
if self.master.state.follow_focus:
opts.append("following")
if self.master.stream_large_bodies:
opts.append("stream:%s" % utils.pretty_size(self.master.stream_large_bodies.max_size))
if opts:
r.append("[%s]"%(":".join(opts)))
if self.master.server.config.mode in ["reverse", "upstream"]:
dst = self.master.server.config.mode.dst
scheme = "https" if dst[0] else "http"
if dst[1] != dst[0]:
scheme += "2https" if dst[1] else "http"
r.append("[dest:%s]"%utils.unparse_url(scheme, *dst[2:]))
if self.master.scripts:
r.append("[")
r.append(("heading_key", "s"))
r.append("cripts:%s]"%len(self.master.scripts))
# r.append("[lt:%0.3f]"%self.master.looptime)
if self.master.stream:
r.append("[W:%s]"%self.master.stream_path)
return r
def redraw(self):
if self.ab.expire and time.time() > self.ab.expire:
self.message("")
fc = self.master.state.flow_count()
if self.master.state.focus is None:
offset = 0
else:
offset = min(self.master.state.focus + 1, fc)
t = [
('heading', ("[%s/%s]"%(offset, fc)).ljust(9))
]
if self.master.server.bound:
host = self.master.server.address.host
if host == "0.0.0.0":
host = "*"
boundaddr = "[%s:%s]"%(host, self.master.server.address.port)
else:
boundaddr = ""
t.extend(self.get_status())
status = urwid.AttrWrap(urwid.Columns([
urwid.Text(t),
urwid.Text(
[
self.helptext,
boundaddr
],
align="right"
),
]), "heading")
self.ib.set_w(status)
def update(self, text):
self.helptext = text
self.redraw()
self.master.drawscreen()
def selectable(self):
return True
def get_edit_text(self):
return self.ab.w.get_edit_text()
def path_prompt(self, prompt, text):
return self.ab.path_prompt(prompt, text)
def prompt(self, prompt, text = ""):
self.ab.prompt(prompt, text)
def message(self, msg, expire=None):
if expire:
expire = time.time() + float(expire)/1000
self.ab.message(msg, expire)
self.master.drawscreen()
#end nocover
class ConsoleState(flow.State):
def __init__(self):
flow.State.__init__(self)
self.focus = None
self.follow_focus = None
self.default_body_view = contentview.get("Auto")
self.view_mode = common.VIEW_LIST
self.view_flow_mode = common.VIEW_FLOW_REQUEST
self.last_script = ""
self.last_saveload = ""
self.flowsettings = weakref.WeakKeyDictionary()
def add_flow_setting(self, flow, key, value):
d = self.flowsettings.setdefault(flow, {})
d[key] = value
def get_flow_setting(self, flow, key, default=None):
d = self.flowsettings.get(flow, {})
return d.get(key, default)
def add_flow(self, f):
super(ConsoleState, self).add_flow(f)
if self.focus is None:
self.set_focus(0)
elif self.follow_focus:
self.set_focus(len(self.view) - 1)
return f
def update_flow(self, f):
super(ConsoleState, self).update_flow(f)
if self.focus is None:
self.set_focus(0)
return f
def set_limit(self, limit):
ret = flow.State.set_limit(self, limit)
self.set_focus(self.focus)
return ret
def get_focus(self):
if not self.view or self.focus is None:
return None, None
return self.view[self.focus], self.focus
def set_focus(self, idx):
if self.view:
if idx >= len(self.view):
idx = len(self.view) - 1
elif idx < 0:
idx = 0
self.focus = idx
def set_focus_flow(self, f):
self.set_focus(self.view.index(f))
def get_from_pos(self, pos):
if len(self.view) <= pos or pos < 0:
return None, None
return self.view[pos], pos
def get_next(self, pos):
return self.get_from_pos(pos+1)
def get_prev(self, pos):
return self.get_from_pos(pos-1)
def delete_flow(self, f):
if f in self.view and self.view.index(f) <= self.focus:
self.focus -= 1
if self.focus < 0:
self.focus = None
ret = flow.State.delete_flow(self, f)
self.set_focus(self.focus)
return ret
def clear(self):
self.focus = None
super(ConsoleState, self).clear()
class Options(object):
attributes = [
"app",
"app_domain",
"app_ip",
"anticache",
"anticomp",
"client_replay",
"eventlog",
"keepserving",
"kill",
"intercept",
"no_server",
"refresh_server_playback",
"rfile",
"scripts",
"showhost",
"replacements",
"rheaders",
"setheaders",
"server_replay",
"stickycookie",
"stickyauth",
"stream_large_bodies",
"verbosity",
"wfile",
"nopop",
"palette",
]
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
for i in self.attributes:
if not hasattr(self, i):
setattr(self, i, None)
#begin nocover
class ConsoleMaster(flow.FlowMaster):
palette = []
def __init__(self, server, options):
flow.FlowMaster.__init__(self, server, ConsoleState())
self.looptime = 0
self.stream_path = None
self.options = options
for i in options.replacements:
self.replacehooks.add(*i)
for i in options.setheaders:
self.setheaders.add(*i)
self.flow_list_walker = None
self.set_palette(options.palette)
r = self.set_intercept(options.intercept)
if r:
print >> sys.stderr, "Intercept error:", r
sys.exit(1)
r = self.set_stickycookie(options.stickycookie)
if r:
print >> sys.stderr, "Sticky cookies error:", r
sys.exit(1)
r = self.set_stickyauth(options.stickyauth)
if r:
print >> sys.stderr, "Sticky auth error:", r
sys.exit(1)
self.set_stream_large_bodies(options.stream_large_bodies)
self.refresh_server_playback = options.refresh_server_playback
self.anticache = options.anticache
self.anticomp = options.anticomp
self.killextra = options.kill
self.rheaders = options.rheaders
self.nopop = options.nopop
self.showhost = options.showhost
self.eventlog = options.eventlog
self.eventlist = urwid.SimpleListWalker([])
if options.client_replay:
self.client_playback_path(options.client_replay)
if options.server_replay:
self.server_playback_path(options.server_replay)
if options.scripts:
for i in options.scripts:
err = self.load_script(i)
if err:
print >> sys.stderr, "Script load error:", err
sys.exit(1)
if options.outfile:
err = self.start_stream_to_path(options.outfile[0], options.outfile[1])
if err:
print >> sys.stderr, "Stream file error:", err
sys.exit(1)
if options.app:
self.start_app(self.options.app_host, self.options.app_port)
def start_stream_to_path(self, path, mode="wb"):
path = os.path.expanduser(path)
try:
f = file(path, mode)
self.start_stream(f, None)
except IOError, v:
return str(v)
self.stream_path = path
def _run_script_method(self, method, s, f):
status, val = s.run(method, f)
if val:
if status:
self.add_event("Method %s return: %s"%(method, val), "debug")
else:
self.add_event("Method %s error: %s"%(method, val[1]), "error")
def run_script_once(self, command, f):
if not command:
return
self.add_event("Running script on flow: %s"%command, "debug")
try:
s = script.Script(command, self)
except script.ScriptError, v:
self.statusbar.message("Error loading script.")
self.add_event("Error loading script:\n%s"%v.args[0], "error")
return
if f.request:
self._run_script_method("request", s, f)
if f.response:
self._run_script_method("response", s, f)
if f.error:
self._run_script_method("error", s, f)
s.unload()
self.refresh_flow(f)
self.state.last_script = command
def set_script(self, command):
if not command:
return
ret = self.load_script(command)
if ret:
self.statusbar.message(ret)
self.state.last_script = command
def toggle_eventlog(self):
self.eventlog = not self.eventlog
self.view_flowlist()
def _readflow(self, path):
path = os.path.expanduser(path)
try:
f = file(path, "rb")
flows = list(flow.FlowReader(f).stream())
except (IOError, flow.FlowReadError), v:
return True, v.strerror
return False, flows
def client_playback_path(self, path):
err, ret = self._readflow(path)
if err:
self.statusbar.message(ret)
else:
self.start_client_playback(ret, False)
def server_playback_path(self, path):
err, ret = self._readflow(path)
if err:
self.statusbar.message(ret)
else:
self.start_server_playback(
ret,
self.killextra, self.rheaders,
False, self.nopop,
self.options.replay_ignore_params, self.options.replay_ignore_content
)
def spawn_editor(self, data):
fd, name = tempfile.mkstemp('', "mproxy")
os.write(fd, data)
os.close(fd)
c = os.environ.get("EDITOR")
# if no EDITOR is set, assume 'vi'
if not c:
c = "vi"
cmd = shlex.split(c)
cmd.append(name)
self.ui.stop()
try:
subprocess.call(cmd)
except:
self.statusbar.message("Can't start editor: %s" % " ".join(c))
else:
data = open(name,"rb").read()
self.ui.start()
os.unlink(name)
return data
def spawn_external_viewer(self, data, contenttype):
if contenttype:
contenttype = contenttype.split(";")[0]
ext = mimetypes.guess_extension(contenttype) or ""
else:
ext = ""
fd, name = tempfile.mkstemp(ext, "mproxy")
os.write(fd, data)
os.close(fd)
# read-only to remind the user that this is a view function
os.chmod(name, stat.S_IREAD)
cmd = None
shell = False
if contenttype:
c = mailcap.getcaps()
cmd, _ = mailcap.findmatch(c, contenttype, filename=name)
if cmd:
shell = True
if not cmd:
# hm which one should get priority?
c = os.environ.get("PAGER") or os.environ.get("EDITOR")
if not c:
c = "less"
cmd = shlex.split(c)
cmd.append(name)
self.ui.stop()
try:
subprocess.call(cmd, shell=shell)
except:
self.statusbar.message("Can't start external viewer: %s" % " ".join(c))
self.ui.start()
os.unlink(name)
def set_palette(self, name):
self.palette = palettes.palettes[name]
def run(self):
self.ui = urwid.raw_display.Screen()
self.ui.set_terminal_properties(256)
self.ui.register_palette(self.palette)
self.flow_list_walker = flowlist.FlowListWalker(self, self.state)
self.view = None
self.statusbar = None
self.header = None
self.body = None
self.help_context = None
self.prompting = False
self.onekey = False
self.view_flowlist()
self.server.start_slave(
controller.Slave,
controller.Channel(self.masterq, self.should_exit)
)
if self.options.rfile:
ret = self.load_flows_path(self.options.rfile)
if ret and self.state.flow_count():
self.add_event(
"File truncated or corrupted. "
"Loaded as many flows as possible.",
"error"
)
elif ret and not self.state.flow_count():
self.shutdown()
print >> sys.stderr, "Could not load file:", ret
sys.exit(1)
try:
self.ui.run_wrapper(self.loop)
except Exception:
self.ui.stop()
sys.stdout.flush()
print >> sys.stderr, traceback.format_exc()
print >> sys.stderr, "mitmproxy has crashed!"
print >> sys.stderr, "Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy"
print >> sys.stderr, "Shutting down..."
sys.stderr.flush()
self.shutdown()
def make_view(self):
self.view = urwid.Frame(
self.body,
header = self.header,
footer = self.statusbar
)
self.view.set_focus("body")
def view_help(self):
h = help.HelpView(self, self.help_context, (self.statusbar, self.body, self.header))
self.statusbar = StatusBar(self, help.footer)
self.body = h
self.header = None
self.make_view()
def view_flowdetails(self, flow):
h = flowdetailview.FlowDetailsView(self, flow, (self.statusbar, self.body, self.header))
self.statusbar = StatusBar(self, flowdetailview.footer)
self.body = h
self.header = None
self.make_view()
def view_grideditor(self, ge):
self.body = ge
self.header = None
self.help_context = ge.make_help()
self.statusbar = StatusBar(self, grideditor.footer)
self.make_view()
def view_flowlist(self):
if self.ui.started:
self.ui.clear()
if self.state.follow_focus:
self.state.set_focus(self.state.flow_count())
if self.eventlog:
self.body = flowlist.BodyPile(self)
else:
self.body = flowlist.FlowListBox(self)
self.statusbar = StatusBar(self, flowlist.footer)
self.header = None
self.state.view_mode = common.VIEW_LIST
self.make_view()
self.help_context = flowlist.help_context
def view_flow(self, flow):
self.body = flowview.FlowView(self, self.state, flow)
self.header = flowview.FlowViewHeader(self, flow)
self.statusbar = StatusBar(self, flowview.footer)
self.state.set_focus_flow(flow)
self.state.view_mode = common.VIEW_FLOW
self.make_view()
self.help_context = flowview.help_context
def _write_flows(self, path, flows):
self.state.last_saveload = path
if not path:
return
path = os.path.expanduser(path)
try:
f = file(path, "wb")
fw = flow.FlowWriter(f)
for i in flows:
fw.add(i)
f.close()
except IOError, v:
self.statusbar.message(v.strerror)
def save_one_flow(self, path, flow):
return self._write_flows(path, [flow])
def save_flows(self, path):
return self._write_flows(path, self.state.view)
def load_flows_callback(self, path):
if not path:
return
ret = self.load_flows_path(path)
return ret or "Flows loaded from %s"%path
def load_flows_path(self, path):
self.state.last_saveload = path
reterr = None
try:
flow.FlowMaster.load_flows_file(self, path)
except flow.FlowReadError, v:
reterr = str(v)
if self.flow_list_walker:
self.sync_list_view()
return reterr
def path_prompt(self, prompt, text, callback, *args):
self.statusbar.path_prompt(prompt, text)
self.view.set_focus("footer")
self.prompting = (callback, args)
def prompt(self, prompt, text, callback, *args):
self.statusbar.prompt(prompt, text)
self.view.set_focus("footer")
self.prompting = (callback, args)
def prompt_edit(self, prompt, text, callback):
self.statusbar.prompt(prompt + ": ", text)
self.view.set_focus("footer")
self.prompting = (callback, [])
def prompt_onekey(self, prompt, keys, callback, *args):
"""
Keys are a set of (word, key) tuples. The appropriate key in the
word is highlighted.
"""
prompt = [prompt, " ("]
mkup = []
for i, e in enumerate(keys):
mkup.extend(common.highlight_key(e[0], e[1]))
if i < len(keys)-1:
mkup.append(",")
prompt.extend(mkup)
prompt.append(")? ")
self.onekey = "".join(i[1] for i in keys)
self.prompt(prompt, "", callback, *args)
def prompt_done(self):
self.prompting = False
self.onekey = False
self.view.set_focus("body")
self.statusbar.message("")
def prompt_execute(self, txt=None):
if not txt:
txt = self.statusbar.get_edit_text()
p, args = self.prompting
self.prompt_done()
msg = p(txt, *args)
if msg:
self.statusbar.message(msg, 1000)
def prompt_cancel(self):
self.prompt_done()
def accept_all(self):
self.state.accept_all(self)
def set_limit(self, txt):
v = self.state.set_limit(txt)
self.sync_list_view()
return v
def set_intercept(self, txt):
return self.state.set_intercept(txt)
def change_default_display_mode(self, t):
v = contentview.get_by_shortcut(t)
self.state.default_body_view = v
self.refresh_focus()
def drawscreen(self):
size = self.ui.get_cols_rows()
canvas = self.view.render(size, focus=1)
self.ui.draw_screen(size, canvas)
return size
def pop_view(self):
if self.state.view_mode == common.VIEW_FLOW:
self.view_flow(self.state.view[self.state.focus])
else:
self.view_flowlist()
def edit_scripts(self, scripts):
commands = [x[0] for x in scripts] # remove outer array
if commands == [s.command for s in self.scripts]:
return
self.unload_scripts()
for command in commands:
self.load_script(command)
def edit_ignore_filter(self, ignore):
patterns = (x[0] for x in ignore)
self.set_ignore_filter(patterns)
def edit_tcp_filter(self, tcp):
patterns = (x[0] for x in tcp)
self.set_tcp_filter(patterns)
def loop(self):
changed = True
try:
while not self.should_exit.is_set():
startloop = time.time()
if changed:
self.statusbar.redraw()
size = self.drawscreen()
changed = self.tick(self.masterq, 0.01)
self.ui.set_input_timeouts(max_wait=0.01)
keys = self.ui.get_input()
if keys:
changed = True
for k in keys:
if self.prompting:
if k == "esc":
self.prompt_cancel()
elif self.onekey:
if k == "enter":
self.prompt_cancel()
elif k in self.onekey:
self.prompt_execute(k)
elif k == "enter":
self.prompt_execute()
else:
self.view.keypress(size, k)
else:
k = self.view.keypress(size, k)
if k:
self.statusbar.message("")
if k == "?":
self.view_help()
elif k == "c":
if not self.client_playback:
self.path_prompt(
"Client replay: ",
self.state.last_saveload,
self.client_playback_path
)
else:
self.prompt_onekey(
"Stop current client replay?",
(
("yes", "y"),
("no", "n"),
),
self.stop_client_playback_prompt,
)
elif k == "H":
self.view_grideditor(
grideditor.SetHeadersEditor(
self,
self.setheaders.get_specs(),
self.setheaders.set
)
)
elif k == "I":
self.view_grideditor(
grideditor.HostPatternEditor(
self,
[[x] for x in self.get_ignore_filter()],
self.edit_ignore_filter
)
)
elif k == "T":
self.view_grideditor(
grideditor.HostPatternEditor(
self,
[[x] for x in self.get_tcp_filter()],
self.edit_tcp_filter
)
)
elif k == "i":
self.prompt(
"Intercept filter: ",
self.state.intercept_txt,
self.set_intercept
)
elif k == "Q":
raise Stop
elif k == "q":
self.prompt_onekey(
"Quit",
(
("yes", "y"),
("no", "n"),
),
self.quit,
)
elif k == "M":
self.prompt_onekey(
"Global default display mode",
contentview.view_prompts,
self.change_default_display_mode
)
elif k == "R":
self.view_grideditor(
grideditor.ReplaceEditor(
self,
self.replacehooks.get_specs(),
self.replacehooks.set
)
)
elif k == "s":
self.view_grideditor(
grideditor.ScriptEditor(
self,
[[i.command] for i in self.scripts],
self.edit_scripts
)
)
#if self.scripts:
# self.load_script(None)
#else:
# self.path_prompt(
# "Set script: ",
# self.state.last_script,
# self.set_script
# )
elif k == "S":
if not self.server_playback:
self.path_prompt(
"Server replay path: ",
self.state.last_saveload,
self.server_playback_path
)
else:
self.prompt_onekey(
"Stop current server replay?",
(
("yes", "y"),
("no", "n"),
),
self.stop_server_playback_prompt,
)
elif k == "o":
self.prompt_onekey(
"Options",
(
("anticache", "a"),
("anticomp", "c"),
("showhost", "h"),
("killextra", "k"),
("norefresh", "n"),
("no-upstream-certs", "u"),
),
self._change_options
)
elif k == "t":
self.prompt(
"Sticky cookie filter: ",
self.stickycookie_txt,
self.set_stickycookie
)
elif k == "u":
self.prompt(
"Sticky auth filter: ",
self.stickyauth_txt,
self.set_stickyauth
)
self.looptime = time.time() - startloop
except (Stop, KeyboardInterrupt):
pass
def stop_client_playback_prompt(self, a):
if a != "n":
self.stop_client_playback()
def stop_server_playback_prompt(self, a):
if a != "n":
self.stop_server_playback()
def quit(self, a):
if a != "n":
raise Stop
def _change_options(self, a):
if a == "a":
self.anticache = not self.anticache
if a == "c":
self.anticomp = not self.anticomp
if a == "h":
self.showhost = not self.showhost
self.sync_list_view()
self.refresh_focus()
elif a == "k":
self.killextra = not self.killextra
elif a == "n":
self.refresh_server_playback = not self.refresh_server_playback
elif a == "u":
self.server.config.no_upstream_cert = not self.server.config.no_upstream_cert
def shutdown(self):
self.state.killall(self)
flow.FlowMaster.shutdown(self)
def sync_list_view(self):
self.flow_list_walker._modified()
def clear_flows(self):
self.state.clear()
self.sync_list_view()
def toggle_follow_flows(self):
# toggle flow follow
self.state.follow_focus = not self.state.follow_focus
# jump to most recent flow if follow is now on
if self.state.follow_focus:
self.state.set_focus(self.state.flow_count())
self.sync_list_view()
def delete_flow(self, f):
self.state.delete_flow(f)
self.sync_list_view()
def refresh_focus(self):
if self.state.view:
self.refresh_flow(self.state.view[self.state.focus])
def refresh_flow(self, c):
if hasattr(self.header, "refresh_flow"):
self.header.refresh_flow(c)
if hasattr(self.body, "refresh_flow"):
self.body.refresh_flow(c)
if hasattr(self.statusbar, "refresh_flow"):
self.statusbar.refresh_flow(c)
def process_flow(self, f):
if self.state.intercept and f.match(self.state.intercept) and not f.request.is_replay:
f.intercept(self)
else:
f.reply()
self.sync_list_view()
self.refresh_flow(f)
def clear_events(self):
self.eventlist[:] = []
def add_event(self, e, level="info"):
needed = dict(error=0, info=1, debug=2).get(level, 1)
if self.options.verbosity < needed:
return
if level == "error":
e = urwid.Text(("error", str(e)))
else:
e = urwid.Text(str(e))
self.eventlist.append(e)
if len(self.eventlist) > EVENTLOG_SIZE:
self.eventlist.pop(0)
self.eventlist.set_focus(len(self.eventlist)-1)
# Handlers
def handle_error(self, f):
f = flow.FlowMaster.handle_error(self, f)
if f:
self.process_flow(f)
return f
def handle_request(self, f):
f = flow.FlowMaster.handle_request(self, f)
if f:
self.process_flow(f)
return f
def handle_response(self, f):
f = flow.FlowMaster.handle_response(self, f)
if f:
self.process_flow(f)
return f
| 1 | 10,617 | Could you adjust this to `with open(path, "rb"):` here and below? We should make sure that we close all files. | mitmproxy-mitmproxy | py |
@@ -7,6 +7,8 @@ package libkbfs
import (
"testing"
+ "golang.org/x/net/context"
+
"github.com/keybase/kbfs/kbfshash"
"github.com/keybase/kbfs/tlf"
) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"testing"
"github.com/keybase/kbfs/kbfshash"
"github.com/keybase/kbfs/tlf"
)
func blockCacheTestInit(t *testing.T, capacity int,
bytesCapacity uint64) Config {
b := NewBlockCacheStandard(capacity, bytesCapacity)
config := MakeTestConfigOrBust(t, "test")
config.SetBlockCache(b)
return config
}
func testBcachePutWithBlock(t *testing.T, id BlockID, bcache BlockCache, lifetime BlockCacheLifetime, block Block) {
ptr := BlockPointer{ID: id}
tlf := tlf.FakeID(1, false)
// put the block
if err := bcache.Put(ptr, tlf, block, lifetime); err != nil {
t.Errorf("Got error on Put for block %s: %v", id, err)
}
// make sure we can get it successfully
if block2, err := bcache.Get(ptr); err != nil {
t.Errorf("Got error on get for block %s: %v", id, err)
} else if block2 != block {
t.Errorf("Got %v, expected %v", block2, block)
}
}
func testBcachePut(t *testing.T, id BlockID, bcache BlockCache, lifetime BlockCacheLifetime) {
block := NewFileBlock()
testBcachePutWithBlock(t, id, bcache, lifetime, block)
}
func testExpectedMissing(t *testing.T, id BlockID, bcache BlockCache) {
expectedErr := NoSuchBlockError{id}
ptr := BlockPointer{ID: id}
if _, err := bcache.Get(ptr); err == nil {
t.Errorf("No expected error on 1st get: %v", err)
} else if err != expectedErr {
t.Errorf("Got unexpected error on 1st get: %v", err)
}
}
func TestBcachePut(t *testing.T) {
config := blockCacheTestInit(t, 100, 1<<30)
defer CheckConfigAndShutdown(t, config)
testBcachePut(t, fakeBlockID(1), config.BlockCache(), TransientEntry)
testBcachePut(t, fakeBlockID(2), config.BlockCache(), PermanentEntry)
}
func TestBcachePutPastCapacity(t *testing.T) {
config := blockCacheTestInit(t, 2, 1<<30)
defer CheckConfigAndShutdown(t, config)
bcache := config.BlockCache()
id1 := fakeBlockID(1)
testBcachePut(t, id1, bcache, TransientEntry)
id2 := fakeBlockID(2)
testBcachePut(t, id2, bcache, TransientEntry)
testBcachePut(t, fakeBlockID(3), bcache, TransientEntry)
// now block 1 should have been kicked out
testExpectedMissing(t, id1, bcache)
// but 2 should still be there
if _, err := bcache.Get(BlockPointer{ID: id2}); err != nil {
t.Errorf("Got unexpected error on 2nd get: %v", err)
}
// permanent blocks don't count
testBcachePut(t, fakeBlockID(4), config.BlockCache(), PermanentEntry)
}
func TestBcacheCheckPtrSuccess(t *testing.T) {
config := blockCacheTestInit(t, 100, 1<<30)
defer CheckConfigAndShutdown(t, config)
bcache := config.BlockCache()
block := NewFileBlock().(*FileBlock)
block.Contents = []byte{1, 2, 3, 4}
id := fakeBlockID(1)
ptr := BlockPointer{ID: id}
tlf := tlf.FakeID(1, false)
err := bcache.Put(ptr, tlf, block, TransientEntry)
if err != nil {
t.Errorf("Couldn't put block: %v", err)
}
checkedPtr, err := bcache.CheckForKnownPtr(tlf, block)
if err != nil {
t.Errorf("Unexpected error checking id: %v", err)
} else if checkedPtr != ptr {
t.Errorf("Unexpected pointer; got %v, expected %v", checkedPtr, id)
}
}
func TestBcacheCheckPtrPermanent(t *testing.T) {
config := blockCacheTestInit(t, 100, 1<<30)
defer config.Shutdown()
bcache := config.BlockCache()
block := NewFileBlock().(*FileBlock)
block.Contents = []byte{1, 2, 3, 4}
id := fakeBlockID(1)
ptr := BlockPointer{ID: id}
tlf := tlf.FakeID(1, false)
err := bcache.Put(ptr, tlf, block, PermanentEntry)
if err != nil {
t.Errorf("Couldn't put block: %v", err)
}
checkedPtr, err := bcache.CheckForKnownPtr(tlf, block)
if err != nil {
t.Errorf("Unexpected error checking id: %v", err)
} else if checkedPtr != (BlockPointer{}) {
t.Errorf("Unexpected non-zero pointer %v", checkedPtr)
}
}
func TestBcacheCheckPtrNotFound(t *testing.T) {
config := blockCacheTestInit(t, 100, 1<<30)
defer CheckConfigAndShutdown(t, config)
bcache := config.BlockCache()
block := NewFileBlock().(*FileBlock)
block.Contents = []byte{1, 2, 3, 4}
id := fakeBlockID(1)
ptr := BlockPointer{ID: id}
tlf := tlf.FakeID(1, false)
err := bcache.Put(ptr, tlf, block, TransientEntry)
if err != nil {
t.Errorf("Couldn't put block: %v", err)
}
block2 := NewFileBlock().(*FileBlock)
block2.Contents = []byte{4, 3, 2, 1}
checkedPtr, err := bcache.CheckForKnownPtr(tlf, block2)
if err != nil {
t.Errorf("Unexpected error checking id: %v", err)
} else if checkedPtr.IsInitialized() {
t.Errorf("Unexpected ID; got %v, expected null", checkedPtr)
}
}
func TestBcacheDeleteTransient(t *testing.T) {
config := blockCacheTestInit(t, 100, 1<<30)
defer CheckConfigAndShutdown(t, config)
bcache := config.BlockCache()
block := NewFileBlock().(*FileBlock)
block.Contents = []byte{1, 2, 3, 4}
id := fakeBlockID(1)
ptr := BlockPointer{ID: id}
tlf := tlf.FakeID(1, false)
err := bcache.Put(ptr, tlf, block, TransientEntry)
if err != nil {
t.Errorf("Couldn't put block: %v", err)
}
if err := bcache.DeleteTransient(ptr, tlf); err != nil {
t.Fatalf("Couldn't delete transient: %v", err)
}
// Make sure the pointer is gone from the hash cache too.
checkedPtr, err := bcache.CheckForKnownPtr(tlf, block)
if err != nil {
t.Errorf("Unexpected error checking id: %v", err)
} else if checkedPtr.IsInitialized() {
t.Errorf("Unexpected ID; got %v, expected null", checkedPtr)
}
}
func TestBcacheDeletePermanent(t *testing.T) {
config := blockCacheTestInit(t, 100, 1<<30)
defer CheckConfigAndShutdown(t, config)
bcache := config.BlockCache()
id1 := fakeBlockID(1)
testBcachePut(t, id1, bcache, PermanentEntry)
id2 := fakeBlockID(2)
block2 := NewFileBlock()
testBcachePutWithBlock(t, id2, bcache, TransientEntry, block2)
testBcachePutWithBlock(t, id2, bcache, PermanentEntry, block2)
bcache.DeletePermanent(id1)
bcache.DeletePermanent(id2)
testExpectedMissing(t, id1, bcache)
// 2 should still be there
if _, err := bcache.Get(BlockPointer{ID: id2}); err != nil {
t.Errorf("Got unexpected error on 2nd get: %v", err)
}
}
func TestBcacheEmptyTransient(t *testing.T) {
config := blockCacheTestInit(t, 0, 1<<30)
defer config.Shutdown()
bcache := config.BlockCache()
block := NewFileBlock()
id := fakeBlockID(1)
ptr := BlockPointer{ID: id}
tlf := tlf.FakeID(1, false)
// Make sure all the operations work even if the cache has no
// transient capacity.
if err := bcache.Put(ptr, tlf, block, TransientEntry); err != nil {
t.Errorf("Got error on Put for block %s: %v", id, err)
}
_, err := bcache.Get(ptr)
if _, ok := err.(NoSuchBlockError); !ok {
t.Errorf("Got unexpected error %v", err)
}
err = bcache.DeletePermanent(id)
if err != nil {
t.Errorf("Got unexpected error %v", err)
}
_, err = bcache.CheckForKnownPtr(tlf, block.(*FileBlock))
if err != nil {
t.Errorf("Got unexpected error %v", err)
}
}
func TestBcacheEvictOnBytes(t *testing.T) {
// Make a cache that can only handle 5 bytes
config := blockCacheTestInit(t, 1000, 5)
defer config.Shutdown()
bcache := config.BlockCache()
tlf := tlf.FakeID(1, false)
for i := byte(0); i < 8; i++ {
block := &FileBlock{
Contents: make([]byte, 1),
}
id := fakeBlockID(i)
ptr := BlockPointer{ID: id}
if err := bcache.Put(ptr, tlf, block, TransientEntry); err != nil {
t.Errorf("Got error on Put for block %s: %v", id, err)
}
}
// Only blocks 3 through 7 should be left
for i := byte(0); i < 3; i++ {
id := fakeBlockID(i)
testExpectedMissing(t, id, bcache)
}
for i := byte(3); i < 8; i++ {
id := fakeBlockID(i)
if _, err := bcache.Get(BlockPointer{ID: id}); err != nil {
t.Errorf("Got unexpected error on get: %v", err)
}
}
}
func TestBcacheEvictIncludesPermanentSize(t *testing.T) {
// Make a cache that can only handle 5 bytes
config := blockCacheTestInit(t, 1000, 5)
defer config.Shutdown()
bcache := config.BlockCache()
tlf := tlf.FakeID(1, false)
idPerm := fakeBlockID(0)
ptr := BlockPointer{ID: idPerm}
block := &FileBlock{
Contents: make([]byte, 2),
}
if err := bcache.Put(ptr, tlf, block, PermanentEntry); err != nil {
t.Errorf("Got error on Put for block %s: %v", idPerm, err)
}
for i := byte(1); i < 8; i++ {
block := &FileBlock{
Contents: make([]byte, 1),
}
id := fakeBlockID(i)
ptr := BlockPointer{ID: id}
if err := bcache.Put(ptr, tlf, block, TransientEntry); err != nil {
t.Errorf("Got error on Put for block %s: %v", id, err)
}
}
// The permanent block shouldn't be evicted
if _, err := bcache.Get(BlockPointer{ID: idPerm}); err != nil {
t.Errorf("Got unexpected error on get: %v", err)
}
// Only transient blocks 5 through 7 should be left
for i := byte(1); i < 5; i++ {
id := fakeBlockID(i)
testExpectedMissing(t, id, bcache)
}
for i := byte(5); i < 8; i++ {
id := fakeBlockID(i)
if _, err := bcache.Get(BlockPointer{ID: id}); err != nil {
t.Errorf("Got unexpected error on get: %v", err)
}
}
// Try putting in a block that's too big
block = &FileBlock{
CommonBlock: CommonBlock{IsInd: true},
}
block.SetEncodedSize(7)
id := fakeBlockID(8)
ptr = BlockPointer{ID: id}
if err := bcache.Put(ptr, tlf, block, TransientEntry); err != nil {
t.Errorf("Got error on Put for block %s: %v", id, err)
}
// All transient blocks should be gone (including the new one)
if _, err := bcache.Get(BlockPointer{ID: idPerm}); err != nil {
t.Errorf("Got unexpected error on get: %v", err)
}
// Only transient blocks 5 through 7 should be left
for i := byte(1); i < 9; i++ {
id := fakeBlockID(i)
testExpectedMissing(t, id, bcache)
}
// Now try putting in a permanent block that exceeds capacity,
// which should always succeed.
idPerm2 := fakeBlockID(9)
ptr2 := BlockPointer{ID: idPerm2}
block2 := &FileBlock{
Contents: make([]byte, 10),
}
if err := bcache.Put(ptr2, tlf, block2, PermanentEntry); err != nil {
t.Errorf("Got error on Put for block %s: %v", idPerm, err)
}
if _, err := bcache.Get(BlockPointer{ID: idPerm}); err != nil {
t.Errorf("Got unexpected error on get: %v", err)
}
if _, err := bcache.Get(BlockPointer{ID: idPerm2}); err != nil {
t.Errorf("Got unexpected error on get: %v", err)
}
}
func TestPutNoHashCalculation(t *testing.T) {
config := blockCacheTestInit(t, 100, 1<<30)
defer CheckConfigAndShutdown(t, config)
bcache := config.BlockCache()
ptr := BlockPointer{ID: fakeBlockID(1)}
tlf := tlf.FakeID(1, false)
block := NewFileBlock().(*FileBlock)
block.Contents = []byte{1, 2, 3, 4}
// this is an invalid hash; if Put() does not calculate hash, it should go
// into the cache
block.hash = &kbfshash.RawDefaultHash{}
if err := bcache.Put(ptr, tlf, block, TransientEntry); err != nil {
t.Errorf("Got error on Put for block %s: %v", ptr.ID, err)
}
// CheckForKnownPtr() calculates hash, which results in a valid hash at
// block.hash. If the block with invalid hash was put into cache, this should
// fail to find the block.
checkedPtr, err := bcache.CheckForKnownPtr(tlf, block)
if err != nil {
t.Errorf("Unexpected error checking id: %v", err)
} else if checkedPtr == ptr {
t.Errorf("Put() is calculating hash")
}
}
| 1 | 15,017 | Does this need its own import block or can it be combined with the imports below as in most other files? | keybase-kbfs | go |
@@ -34,6 +34,12 @@ _registered_options = {
# For example, this value determines whether the repr() for a dataframe prints out fully or
# just a truncated repr.
"display.max_rows": 1000, # TODO: None should support unlimited.
+
+ # This determines whether or not to operate between two different dataframs.
+ # For example, 'combine_frames' function internally performs a join operation which can be
+ # expensive in general.
+ # So, if `compute.ops_on_diff_frames` variable is not True, that method throws an exception.
+ "compute.ops_on_diff_frames": True,
} # type: Dict[str, Any]
| 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Infrastructure of configuration for Koalas.
"""
import json
from typing import Dict, Union, Any
from pyspark._globals import _NoValue, _NoValueType
from databricks.koalas.utils import default_session
__all__ = ['get_option', 'set_option', 'reset_option']
# dict to store registered options and their default values (key -> default).
_registered_options = {
# This sets the maximum number of rows koalas should output when printing out various output.
# For example, this value determines whether the repr() for a dataframe prints out fully or
# just a truncated repr.
"display.max_rows": 1000, # TODO: None should support unlimited.
} # type: Dict[str, Any]
_key_format = 'koalas.{}'.format
class OptionError(AttributeError, KeyError):
pass
def get_option(key: str, default: Union[str, _NoValueType] = _NoValue) -> Any:
"""
Retrieves the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
default : object
The default value if the option is not set yet. The value should be JSON serializable.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists and the default is not provided
"""
_check_option(key, default)
if default is _NoValue:
default = _registered_options[key]
return json.loads(default_session().conf.get(_key_format(key), default=json.dumps(default)))
def set_option(key: str, value: Any) -> None:
"""
Sets the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
value : object
New value of option. The value should be JSON serializable.
Returns
-------
None
"""
_check_option(key, value)
default_session().conf.set(_key_format(key), json.dumps(value))
def reset_option(key: str) -> None:
"""
Reset one option to their default value.
Pass "all" as argument to reset all options.
Parameters
----------
key : str
If specified only option will be reset.
Returns
-------
None
"""
_check_option(key)
default_session().conf.unset(_key_format(key))
def _check_option(key: str, value: Union[str, _NoValueType] = _NoValue) -> None:
if key not in _registered_options:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_registered_options.keys()))))
if value is None:
return # None is allowed for all types.
if value is not _NoValue and not isinstance(value, type(_registered_options[key])):
raise TypeError("The configuration value for '%s' was %s; however, %s is expected." % (
key, type(value), type(_registered_options[key])))
| 1 | 11,259 | It defaults to `false`. | databricks-koalas | py |
@@ -7,8 +7,12 @@ type corsHandler struct {
}
func (wrapper corsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
- resp.Header().Set("Access-Control-Allow-Origin", "*")
- resp.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
+ if isPreflightCorsRequest(req) {
+ applyPreflightCorsResponse(resp)
+ return
+ }
+
+ allowAllCorsActions(resp)
wrapper.originalHandler.ServeHTTP(resp, req)
}
| 1 | package tequilapi
import "net/http"
type corsHandler struct {
originalHandler http.Handler
}
func (wrapper corsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
resp.Header().Set("Access-Control-Allow-Origin", "*")
resp.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
wrapper.originalHandler.ServeHTTP(resp, req)
}
//ApplyCors wraps original handler by adding cors headers to response BEFORE original ServeHTTP method is called
func ApplyCors(original http.Handler) http.Handler {
return corsHandler{original}
}
| 1 | 10,175 | Why conditional check is done? As i understand, later both `applyPreflightCorsResponse()` and `allowAllCorsActions()` does the same | mysteriumnetwork-node | go |
@@ -142,6 +142,9 @@ func (mh *MessageHandle) OnRegister(connection conn.Connection) {
if _, ok := mh.KeepaliveChannel[nodeID]; !ok {
mh.KeepaliveChannel[nodeID] = make(chan struct{}, 1)
+ } else {
+ klog.Warningf("Node %v/%v has not yet exit, OnRegister failed", projectID, nodeID)
+ return
}
io := &hubio.JSONIO{Connection: connection} | 1 | package handler
import (
"encoding/json"
"fmt"
"regexp"
"strings"
"sync"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
beehiveModel "github.com/kubeedge/beehive/pkg/core/model"
"github.com/kubeedge/kubeedge/cloud/pkg/apis/reliablesyncs/v1alpha1"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/channelq"
hubio "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/io"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
hubconfig "github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/config"
deviceconst "github.com/kubeedge/kubeedge/cloud/pkg/devicecontroller/constants"
edgeconst "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/constants"
edgemessagelayer "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/messagelayer"
"github.com/kubeedge/kubeedge/cloud/pkg/synccontroller"
"github.com/kubeedge/kubeedge/common/constants"
"github.com/kubeedge/viaduct/pkg/conn"
"github.com/kubeedge/viaduct/pkg/mux"
)
// ExitCode exit code
type ExitCode int
const (
hubioReadFail ExitCode = iota
hubioWriteFail
messageQueueDisconnect
nodeStop
nodeDisconnect
)
// constants for error message
const (
MsgFormatError = "message format not correct"
VolumePattern = `^\w[-\w.+]*/` + constants.CSIResourceTypeVolume + `/\w[-\w.+]*`
)
// VolumeRegExp is used to validate the volume resource
var VolumeRegExp = regexp.MustCompile(VolumePattern)
// MessageHandle processes messages between cloud and edge
type MessageHandle struct {
KeepaliveInterval int
WriteTimeout int
Nodes sync.Map
nodeConns sync.Map
nodeLocks sync.Map
MessageQueue *channelq.ChannelMessageQueue
Handlers []HandleFunc
NodeLimit int
KeepaliveChannel map[string]chan struct{}
MessageAcks sync.Map
}
type HandleFunc func(hi hubio.CloudHubIO, info *model.HubInfo, exitServe chan ExitCode, stopSendMsg chan struct{})
var once sync.Once
// CloudhubHandler the shared handler for both websocket and quic servers
var CloudhubHandler *MessageHandle
// InitHandler create a handler for websocket and quic servers
func InitHandler(eventq *channelq.ChannelMessageQueue) {
once.Do(func() {
CloudhubHandler = &MessageHandle{
KeepaliveInterval: int(hubconfig.Config.KeepaliveInterval),
WriteTimeout: int(hubconfig.Config.WriteTimeout),
MessageQueue: eventq,
NodeLimit: int(hubconfig.Config.NodeLimit),
}
CloudhubHandler.KeepaliveChannel = make(map[string]chan struct{})
CloudhubHandler.Handlers = []HandleFunc{
CloudhubHandler.KeepaliveCheckLoop,
CloudhubHandler.MessageWriteLoop,
CloudhubHandler.ListMessageWriteLoop,
}
CloudhubHandler.initServerEntries()
})
}
// initServerEntries register handler func
func (mh *MessageHandle) initServerEntries() {
mux.Entry(mux.NewPattern("*").Op("*"), mh.HandleServer)
}
// HandleServer handle all the request from node
func (mh *MessageHandle) HandleServer(container *mux.MessageContainer, writer mux.ResponseWriter) {
nodeID := container.Header.Get("node_id")
projectID := container.Header.Get("project_id")
if mh.GetNodeCount() >= mh.NodeLimit {
klog.Errorf("Fail to serve node %s, reach node limit", nodeID)
return
}
if container.Message.GetOperation() == model.OpKeepalive {
klog.Infof("Keepalive message received from node: %s", nodeID)
mh.KeepaliveChannel[nodeID] <- struct{}{}
return
}
// handle the reponse from edge
if VolumeRegExp.MatchString(container.Message.GetResource()) {
beehiveContext.SendResp(*container.Message)
return
}
// handle the ack message from edge
if container.Message.Router.Operation == beehiveModel.ResponseOperation {
if ackChan, ok := mh.MessageAcks.Load(container.Message.Header.ParentID); ok {
close(ackChan.(chan struct{}))
mh.MessageAcks.Delete(container.Message.Header.ParentID)
}
return
}
err := mh.PubToController(&model.HubInfo{ProjectID: projectID, NodeID: nodeID}, container.Message)
if err != nil {
// if err, we should stop node, write data to edgehub, stop nodify
klog.Errorf("Failed to serve handle with error: %s", err.Error())
}
}
// OnRegister register node on first connection
func (mh *MessageHandle) OnRegister(connection conn.Connection) {
nodeID := connection.ConnectionState().Headers.Get("node_id")
projectID := connection.ConnectionState().Headers.Get("project_id")
if _, ok := mh.KeepaliveChannel[nodeID]; !ok {
mh.KeepaliveChannel[nodeID] = make(chan struct{}, 1)
}
io := &hubio.JSONIO{Connection: connection}
go mh.ServeConn(io, &model.HubInfo{ProjectID: projectID, NodeID: nodeID})
}
// KeepaliveCheckLoop checks whether the edge node is still alive
func (mh *MessageHandle) KeepaliveCheckLoop(hi hubio.CloudHubIO, info *model.HubInfo, stopServe chan ExitCode, stopSendMsg chan struct{}) {
keepaliveTicker := time.NewTimer(time.Duration(mh.KeepaliveInterval) * time.Second)
for {
select {
case _, ok := <-mh.KeepaliveChannel[info.NodeID]:
if !ok {
return
}
klog.Infof("Node %s is still alive", info.NodeID)
keepaliveTicker.Reset(time.Duration(mh.KeepaliveInterval) * time.Second)
case <-keepaliveTicker.C:
klog.Warningf("Timeout to receive heart beat from edge node %s for project %s",
info.NodeID, info.ProjectID)
stopServe <- nodeDisconnect
close(stopSendMsg)
return
}
}
}
func dumpMessageMetadata(msg *beehiveModel.Message) string {
return fmt.Sprintf("id: %s, parent_id: %s, group: %s, source: %s, resource: %s, operation: %s",
msg.Header.ID, msg.Header.ParentID, msg.Router.Group, msg.Router.Source, msg.Router.Resource, msg.Router.Operation)
}
func trimMessage(msg *beehiveModel.Message) {
resource := msg.GetResource()
if strings.HasPrefix(resource, model.ResNode) {
tokens := strings.Split(resource, "/")
if len(tokens) < 3 {
klog.Warningf("event resource %s starts with node but length less than 3", resource)
} else {
msg.SetResourceOperation(strings.Join(tokens[2:], "/"), msg.GetOperation())
}
}
}
func notifyEventQueueError(hi hubio.CloudHubIO, code ExitCode, nodeID string) {
if code == messageQueueDisconnect {
msg := beehiveModel.NewMessage("").BuildRouter(model.GpResource, model.SrcCloudHub, model.NewResource(model.ResNode, nodeID, nil), model.OpDisConnect)
err := hi.WriteData(msg)
if err != nil {
klog.Errorf("fail to notify node %s event queue disconnected, reason: %s", nodeID, err.Error())
}
}
}
func constructConnectMessage(info *model.HubInfo, isConnected bool) *beehiveModel.Message {
connected := model.OpConnect
if !isConnected {
connected = model.OpDisConnect
}
body := map[string]interface{}{
"event_type": connected,
"timestamp": time.Now().Unix(),
"client_id": info.NodeID}
content, _ := json.Marshal(body)
msg := beehiveModel.NewMessage("")
msg.BuildRouter(model.SrcCloudHub, model.GpResource, model.NewResource(model.ResNode, info.NodeID, nil), connected)
msg.FillBody(content)
return msg
}
func (mh *MessageHandle) PubToController(info *model.HubInfo, msg *beehiveModel.Message) error {
msg.SetResourceOperation(fmt.Sprintf("node/%s/%s", info.NodeID, msg.GetResource()), msg.GetOperation())
klog.Infof("event received for node %s %s, content: %s", info.NodeID, dumpMessageMetadata(msg), msg.Content)
if model.IsFromEdge(msg) {
err := mh.MessageQueue.Publish(msg)
if err != nil {
// content is not logged since it may contain sensitive information
klog.Errorf("fail to publish event for node %s, %s, reason: %s",
info.NodeID, dumpMessageMetadata(msg), err.Error())
return err
}
}
return nil
}
func (mh *MessageHandle) hubIoWrite(hi hubio.CloudHubIO, nodeID string, msg *beehiveModel.Message) error {
value, ok := mh.nodeLocks.Load(nodeID)
if !ok {
return fmt.Errorf("node disconnected")
}
mutex := value.(*sync.Mutex)
mutex.Lock()
defer mutex.Unlock()
return hi.WriteData(msg)
}
// ServeConn starts serving the incoming connection
func (mh *MessageHandle) ServeConn(hi hubio.CloudHubIO, info *model.HubInfo) {
err := mh.RegisterNode(hi, info)
if err != nil {
klog.Errorf("fail to register node %s, reason %s", info.NodeID, err.Error())
return
}
klog.Infof("edge node %s for project %s connected", info.NodeID, info.ProjectID)
exitServe := make(chan ExitCode, 3)
stopSendMsg := make(chan struct{})
for _, handle := range mh.Handlers {
go handle(hi, info, exitServe, stopSendMsg)
}
code := <-exitServe
mh.UnregisterNode(hi, info, code)
}
// RegisterNode register node in cloudhub for the incoming connection
func (mh *MessageHandle) RegisterNode(hi hubio.CloudHubIO, info *model.HubInfo) error {
mh.MessageQueue.Connect(info)
err := mh.MessageQueue.Publish(constructConnectMessage(info, true))
if err != nil {
klog.Errorf("fail to publish node connect event for node %s, reason %s", info.NodeID, err.Error())
notifyEventQueueError(hi, messageQueueDisconnect, info.NodeID)
err = hi.Close()
if err != nil {
klog.Errorf("fail to close connection, reason: %s", err.Error())
}
return err
}
mh.nodeConns.Store(info.NodeID, hi)
mh.nodeLocks.Store(info.NodeID, &sync.Mutex{})
mh.Nodes.Store(info.NodeID, true)
return nil
}
// UnregisterNode unregister node in cloudhub
func (mh *MessageHandle) UnregisterNode(hi hubio.CloudHubIO, info *model.HubInfo, code ExitCode) {
mh.nodeLocks.Delete(info.NodeID)
mh.nodeConns.Delete(info.NodeID)
close(mh.KeepaliveChannel[info.NodeID])
delete(mh.KeepaliveChannel, info.NodeID)
err := mh.MessageQueue.Publish(constructConnectMessage(info, false))
if err != nil {
klog.Errorf("fail to publish node disconnect event for node %s, reason %s", info.NodeID, err.Error())
}
notifyEventQueueError(hi, code, info.NodeID)
mh.Nodes.Delete(info.NodeID)
err = hi.Close()
if err != nil {
klog.Errorf("fail to close connection, reason: %s", err.Error())
}
// delete the nodeQueue and nodeStore when node stopped
if code == nodeStop {
mh.MessageQueue.Close(info)
}
}
// GetNodeCount returns the number of connected Nodes
func (mh *MessageHandle) GetNodeCount() int {
var num int
iter := func(key, value interface{}) bool {
num++
return true
}
mh.Nodes.Range(iter)
return num
}
// ListMessageWriteLoop processes all list type resource write requests
func (mh *MessageHandle) ListMessageWriteLoop(hi hubio.CloudHubIO, info *model.HubInfo, stopServe chan ExitCode, stopSendMsg chan struct{}) {
nodeListQueue, err := mh.MessageQueue.GetNodeListQueue(info.NodeID)
if err != nil {
klog.Errorf("Failed to get nodeQueue for node %s: %v", info.NodeID, err)
stopServe <- messageQueueDisconnect
return
}
nodeListStore, err := mh.MessageQueue.GetNodeListStore(info.NodeID)
if err != nil {
klog.Errorf("Failed to get nodeStore for node %s: %v", info.NodeID, err)
stopServe <- messageQueueDisconnect
return
}
for {
select {
case <-stopSendMsg:
klog.Errorf("Node %s disconnected and stopped sending messages", info.NodeID)
return
default:
mh.handleMessage(nodeListQueue, nodeListStore, hi, info, stopServe, "listMessage")
}
}
}
// MessageWriteLoop processes all write requests
func (mh *MessageHandle) MessageWriteLoop(hi hubio.CloudHubIO, info *model.HubInfo, stopServe chan ExitCode, stopSendMsg chan struct{}) {
nodeQueue, err := mh.MessageQueue.GetNodeQueue(info.NodeID)
if err != nil {
klog.Errorf("Failed to get nodeQueue for node %s: %v", info.NodeID, err)
stopServe <- messageQueueDisconnect
return
}
nodeStore, err := mh.MessageQueue.GetNodeStore(info.NodeID)
if err != nil {
klog.Errorf("Failed to get nodeStore for node %s: %v", info.NodeID, err)
stopServe <- messageQueueDisconnect
return
}
for {
select {
case <-stopSendMsg:
klog.Errorf("Node %s disconnected and stopped sending messages", info.NodeID)
return
default:
mh.handleMessage(nodeQueue, nodeStore, hi, info, stopServe, "message")
}
}
}
func (mh *MessageHandle) handleMessage(nodeQueue workqueue.RateLimitingInterface,
nodeStore cache.Store, hi hubio.CloudHubIO,
info *model.HubInfo, stopServe chan ExitCode, msgType string) {
key, quit := nodeQueue.Get()
if quit {
klog.Errorf("nodeQueue for node %s has shutdown", info.NodeID)
return
}
obj, exist, _ := nodeStore.GetByKey(key.(string))
if !exist {
klog.Errorf("nodeStore for node %s doesn't exist", info.NodeID)
return
}
msg := obj.(*beehiveModel.Message)
if model.IsNodeStopped(msg) {
klog.Infof("node %s is stopped, will disconnect", info.NodeID)
stopServe <- nodeStop
return
}
if !model.IsToEdge(msg) {
klog.Infof("skip only to cloud event for node %s, %s, content %s", info.NodeID, dumpMessageMetadata(msg), msg.Content)
return
}
klog.V(4).Infof("event to send for node %s, %s, content %s", info.NodeID, dumpMessageMetadata(msg), msg.Content)
copyMsg := deepcopy(msg)
trimMessage(msg)
err := hi.SetWriteDeadline(time.Now().Add(time.Duration(mh.WriteTimeout) * time.Second))
if err != nil {
klog.Errorf("SetWriteDeadline error, %s", err.Error())
stopServe <- hubioWriteFail
return
}
if msgType == "listMessage" {
mh.send(hi, info, msg)
// delete successfully sent events from the queue/store
nodeStore.Delete(msg)
} else {
mh.sendMsg(hi, info, msg, copyMsg, nodeStore)
}
nodeQueue.Done(key)
}
func (mh *MessageHandle) sendMsg(hi hubio.CloudHubIO, info *model.HubInfo, msg, copyMsg *beehiveModel.Message, nodeStore cache.Store) {
ackChan := make(chan struct{})
mh.MessageAcks.Store(msg.GetID(), ackChan)
// initialize timer and retry count for sending message
var (
retry = 0
retryInterval time.Duration = 5
)
ticker := time.NewTimer(retryInterval * time.Second)
mh.send(hi, info, msg)
LOOP:
for {
select {
case <-ackChan:
mh.saveSuccessPoint(copyMsg, info, nodeStore)
break LOOP
case <-ticker.C:
if retry == 4 {
break LOOP
}
mh.send(hi, info, msg)
retry++
ticker.Reset(time.Second * retryInterval)
}
}
}
func (mh *MessageHandle) send(hi hubio.CloudHubIO, info *model.HubInfo, msg *beehiveModel.Message) {
err := mh.hubIoWrite(hi, info.NodeID, msg)
if err != nil {
klog.Errorf("write error, connection for node %s will be closed, affected event %s, reason %s",
info.NodeID, dumpMessageMetadata(msg), err.Error())
return
}
}
func (mh *MessageHandle) saveSuccessPoint(msg *beehiveModel.Message, info *model.HubInfo, nodeStore cache.Store) {
if msg.GetGroup() == edgeconst.GroupResource {
resourceNamespace, _ := edgemessagelayer.GetNamespace(*msg)
resourceName, _ := edgemessagelayer.GetResourceName(*msg)
resourceType, _ := edgemessagelayer.GetResourceType(*msg)
resourceUID, err := channelq.GetMessageUID(*msg)
if err != nil {
return
}
objectSyncName := synccontroller.BuildObjectSyncName(info.NodeID, resourceUID)
if msg.GetOperation() == beehiveModel.DeleteOperation {
nodeStore.Delete(msg)
mh.deleteSuccessPoint(resourceNamespace, objectSyncName)
return
}
objectSync, err := mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).Get(objectSyncName, metav1.GetOptions{})
if err == nil {
objectSync.Status.ObjectResourceVersion = msg.GetResourceVersion()
_, err := mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).UpdateStatus(objectSync)
if err != nil {
klog.Errorf("Failed to update objectSync: %v, resourceType: %s, resourceNamespace: %s, resourceName: %s",
err, resourceType, resourceNamespace, resourceName)
}
} else if err != nil && apierrors.IsNotFound(err) {
objectSync := &v1alpha1.ObjectSync{
ObjectMeta: metav1.ObjectMeta{
Name: objectSyncName,
},
Spec: v1alpha1.ObjectSyncSpec{
ObjectAPIVersion: "",
ObjectKind: resourceType,
ObjectName: resourceName,
},
}
_, err := mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).Create(objectSync)
if err != nil {
klog.Errorf("Failed to create objectSync: %s, err: %v", objectSyncName, err)
return
}
objectSyncStatus, err := mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).Get(objectSyncName, metav1.GetOptions{})
if err != nil {
klog.Errorf("Failed to get objectSync: %s, err: %v", objectSyncName, err)
}
objectSyncStatus.Status.ObjectResourceVersion = msg.GetResourceVersion()
mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).UpdateStatus(objectSyncStatus)
}
}
// TODO: save device info
if msg.GetGroup() == deviceconst.GroupTwin {
}
klog.Infof("saveSuccessPoint successfully for message: %s", msg.GetResource())
}
func (mh *MessageHandle) deleteSuccessPoint(resourceNamespace, objectSyncName string) {
mh.MessageQueue.ObjectSyncController.CrdClient.ReliablesyncsV1alpha1().ObjectSyncs(resourceNamespace).Delete(objectSyncName, metav1.NewDeleteOptions(0))
}
func deepcopy(msg *beehiveModel.Message) *beehiveModel.Message {
if msg == nil {
return nil
}
out := new(beehiveModel.Message)
out.Header = msg.Header
out.Router = msg.Router
out.Content = msg.Content
return out
}
| 1 | 16,359 | has not yet exit? | kubeedge-kubeedge | go |
@@ -247,6 +247,11 @@ _rt_hemuother_per_user_known = {
class RadioTap(Packet):
name = "RadioTap dummy"
+ deprecated_fields = {
+ "Channel": "ChannelFrequency", # 2.4.3
+ "ChannelFlags2": "ChannelPlusFlags", # 2.4.3
+ "ChannelNumber": "ChannelPlusNumber" # 2.4.3
+ }
fields_desc = [
ByteField('version', 0),
ByteField('pad', 0), | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# Copyright (C) Philippe Biondi <[email protected]>
"""
Wireless LAN according to IEEE 802.11.
"""
from __future__ import print_function
import math
import re
import struct
from zlib import crc32
from scapy.config import conf, crypto_validator
from scapy.data import ETHER_ANY, DLT_IEEE802_11, DLT_PRISM_HEADER, \
DLT_IEEE802_11_RADIO
from scapy.compat import raw, plain_str, orb, chb
from scapy.packet import Packet, bind_layers, bind_top_down, NoPayload
from scapy.fields import ByteField, LEShortField, BitField, LEShortEnumField, \
ByteEnumField, X3BytesField, FlagsField, LELongField, StrField, \
StrLenField, IntField, XByteField, LEIntField, StrFixedLenField, \
LESignedIntField, ReversePadField, ConditionalField, PacketListField, \
ShortField, BitEnumField, FieldLenField, LEFieldLenField, \
FieldListField, XStrFixedLenField, PacketField, FCSField
from scapy.ansmachine import AnsweringMachine
from scapy.plist import PacketList
from scapy.layers.l2 import Ether, LLC, MACField
from scapy.layers.inet import IP, TCP
from scapy.error import warning, log_loading
from scapy.sendrecv import sniff, sendp
from scapy.utils import issubtype
if conf.crypto_valid:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms
else:
default_backend = Ciphers = algorithms = None
log_loading.info("Can't import python-cryptography v1.7+. Disabled WEP decryption/encryption. (Dot11)") # noqa: E501
# Layers
class PrismHeader(Packet):
""" iwpriv wlan0 monitor 3 """
name = "Prism header"
fields_desc = [LEIntField("msgcode", 68),
LEIntField("len", 144),
StrFixedLenField("dev", "", 16),
LEIntField("hosttime_did", 0),
LEShortField("hosttime_status", 0),
LEShortField("hosttime_len", 0),
LEIntField("hosttime", 0),
LEIntField("mactime_did", 0),
LEShortField("mactime_status", 0),
LEShortField("mactime_len", 0),
LEIntField("mactime", 0),
LEIntField("channel_did", 0),
LEShortField("channel_status", 0),
LEShortField("channel_len", 0),
LEIntField("channel", 0),
LEIntField("rssi_did", 0),
LEShortField("rssi_status", 0),
LEShortField("rssi_len", 0),
LEIntField("rssi", 0),
LEIntField("sq_did", 0),
LEShortField("sq_status", 0),
LEShortField("sq_len", 0),
LEIntField("sq", 0),
LEIntField("signal_did", 0),
LEShortField("signal_status", 0),
LEShortField("signal_len", 0),
LESignedIntField("signal", 0),
LEIntField("noise_did", 0),
LEShortField("noise_status", 0),
LEShortField("noise_len", 0),
LEIntField("noise", 0),
LEIntField("rate_did", 0),
LEShortField("rate_status", 0),
LEShortField("rate_len", 0),
LEIntField("rate", 0),
LEIntField("istx_did", 0),
LEShortField("istx_status", 0),
LEShortField("istx_len", 0),
LEIntField("istx", 0),
LEIntField("frmlen_did", 0),
LEShortField("frmlen_status", 0),
LEShortField("frmlen_len", 0),
LEIntField("frmlen", 0),
]
def answers(self, other):
if isinstance(other, PrismHeader):
return self.payload.answers(other.payload)
else:
return self.payload.answers(other)
# RadioTap
class _RadiotapReversePadField(ReversePadField):
def __init__(self, fld):
self._fld = fld
self._padwith = b"\x00"
# Quote from https://www.radiotap.org/:
# ""Radiotap requires that all fields in the radiotap header are aligned to natural boundaries. # noqa: E501
# For radiotap, that means all 8-, 16-, 32-, and 64-bit fields must begin on 8-, 16-, 32-, and 64-bit boundaries, respectively."" # noqa: E501
if isinstance(self._fld, BitField):
self._align = int(math.ceil(self.i2len(None, None)))
else:
self._align = struct.calcsize(self._fld.fmt)
class _dbmField(ByteField):
def i2m(self, pkt, x):
return super(ByteField, self).i2m(pkt, x + 256)
def m2i(self, pkt, x):
return super(ByteField, self).m2i(pkt, x) - 256
def i2repr(self, pkt, x):
return "%sdBm" % x
def _next_radiotap_extpm(pkt, lst, cur, s):
"""Generates the next RadioTapExtendedPresenceMask"""
if cur is None or (cur.present and cur.present.Ext):
st = len(lst) + (cur is not None)
return lambda *args: RadioTapExtendedPresenceMask(*args, index=st)
return None
class RadioTapExtendedPresenceMask(Packet):
"""RadioTapExtendedPresenceMask should be instantiated by passing an
`index=` kwarg, stating which place the item has in the list.
Passing index will update the b[x] fields accordingly to the index.
e.g.
>>> a = RadioTapExtendedPresenceMask(present="b0+b12+b29+Ext")
>>> b = RadioTapExtendedPresenceMask(index=1, present="b33+b45+b59+b62")
>>> pkt = RadioTap(present="Ext", Ext=[a, b])
"""
name = "RadioTap Extended presence mask"
fields_desc = [FlagsField('present', None, -32,
["b%s" % i for i in range(0, 31)] + ["Ext"])]
def __init__(self, _pkt=None, index=0, **kwargs):
self._restart_indentation(index)
Packet.__init__(self, _pkt, **kwargs)
def _restart_indentation(self, index):
st = index * 32
self.fields_desc[0].names = ["b%s" % (i + st) for i in range(0, 31)] + ["Ext"] # noqa: E501
def guess_payload_class(self, pay):
return conf.padding_layer
# RadioTap constants
_rt_present = ['TSFT', 'Flags', 'Rate', 'Channel', 'FHSS', 'dBm_AntSignal',
'dBm_AntNoise', 'Lock_Quality', 'TX_Attenuation',
'dB_TX_Attenuation', 'dBm_TX_Power', 'Antenna',
'dB_AntSignal', 'dB_AntNoise', 'RXFlags', 'TXFlags',
'b17', 'b18', 'ChannelPlus', 'MCS', 'A_MPDU',
'VHT', 'timestamp', 'HE', 'HE_MU', 'HE_MU_other_user',
'zero_length_psdu', 'L_SIG', 'b28',
'RadiotapNS', 'VendorNS', 'Ext']
# Note: Inconsistencies with wireshark
# Wireshark ignores the suggested fields, whereas we implement some of them
# (some are well-used even though not accepted)
# However, flags that conflicts with Wireshark are not and MUST NOT be
# implemented -> b17, b18
_rt_flags = ['CFP', 'ShortPreamble', 'wep', 'fragment', 'FCS', 'pad',
'badFCS', 'ShortGI']
_rt_channelflags = ['res1', 'res2', 'res3', 'res4', 'Turbo', 'CCK',
'OFDM', '2GHz', '5GHz', 'Passive', 'Dynamic_CCK_OFDM',
'GFSK', 'GSM', 'StaticTurbo', '10MHz', '5MHz']
_rt_rxflags = ["res1", "BAD_PLCP", "res2"]
_rt_txflags = ["TX_FAIL", "CTS", "RTS", "NOACK", "NOSEQ"]
_rt_channelflags2 = ['res1', 'res2', 'res3', 'res4', 'Turbo', 'CCK',
'OFDM', '2GHz', '5GHz', 'Passive', 'Dynamic_CCK_OFDM',
'GFSK', 'GSM', 'StaticTurbo', '10MHz', '5MHz',
'20MHz', '40MHz_ext_channel_above',
'40MHz_ext_channel_below',
'res5', 'res6', 'res7', 'res8', 'res9']
_rt_knownmcs = ['MCS_bandwidth', 'MCS_index', 'guard_interval', 'HT_format',
'FEC_type', 'STBC_streams', 'Ness', 'Ness_MSB']
_rt_bandwidth = {0: "20MHz", 1: "40MHz", 2: "ht40Mhz-", 3: "ht40MHz+"}
_rt_a_mpdu_flags = ['Report0Subframe', 'Is0Subframe', 'KnownLastSubframe',
'LastSubframe', 'CRCerror', 'EOFsubframe', 'KnownEOF',
'res1', 'res2', 'res3', 'res4', 'res5', 'res6', 'res7',
'res8']
_rt_vhtbandwidth = {
0: "20MHz", 1: "40MHz", 2: "40MHz", 3: "40MHz", 4: "80MHz", 5: "80MHz",
6: "80MHz", 7: "80MHz", 8: "80MHz", 9: "80MHz", 10: "80MHz", 11: "160MHz",
12: "160MHz", 13: "160MHz", 14: "160MHz", 15: "160MHz", 16: "160MHz",
17: "160MHz", 18: "160MHz", 19: "160MHz", 20: "160MHz", 21: "160MHz",
22: "160MHz", 23: "160MHz", 24: "160MHz", 25: "160MHz"
}
_rt_knownvht = ['STBC', 'TXOP_PS_NOT_ALLOWED', 'GuardInterval', 'SGINsysmDis',
'LDPCextraOFDM', 'Beamformed', 'Bandwidth', 'GroupID',
'PartialAID',
'res1', 'res2', 'res3', 'res4', 'res5', 'res6', 'res7']
_rt_presentvht = ['STBC', 'TXOP_PS_NOT_ALLOWED', 'GuardInterval',
'SGINsysmDis', 'LDPCextraOFDM', 'Beamformed',
'res1', 'res2']
_rt_hemuother_per_user_known = {
'user field position',
'STA-ID',
'NSTS',
'Tx Beamforming',
'Spatial Configuration',
'MCS',
'DCM',
'Coding',
}
class RadioTap(Packet):
name = "RadioTap dummy"
fields_desc = [
ByteField('version', 0),
ByteField('pad', 0),
LEShortField('len', None),
FlagsField('present', None, -32, _rt_present), # noqa: E501
# Extended presence mask
ConditionalField(PacketListField("Ext", [], next_cls_cb=_next_radiotap_extpm), lambda pkt: pkt.present and pkt.present.Ext), # noqa: E501
# RadioTap fields - each starts with a _RadiotapReversePadField
# to handle padding
# TSFT
ConditionalField(
_RadiotapReversePadField(
LELongField("mac_timestamp", 0)
),
lambda pkt: pkt.present and pkt.present.TSFT),
# Flags
ConditionalField(
_RadiotapReversePadField(
FlagsField("Flags", None, -8, _rt_flags)
),
lambda pkt: pkt.present and pkt.present.Flags),
# Rate
ConditionalField(
_RadiotapReversePadField(
ByteField("Rate", 0)
),
lambda pkt: pkt.present and pkt.present.Rate),
# Channel
ConditionalField(
_RadiotapReversePadField(
LEShortField("Channel", 0)
),
lambda pkt: pkt.present and pkt.present.Channel),
ConditionalField(
FlagsField("ChannelFlags", None, -16, _rt_channelflags),
lambda pkt: pkt.present and pkt.present.Channel),
# dBm_AntSignal
ConditionalField(
_RadiotapReversePadField(
_dbmField("dBm_AntSignal", -256)
),
lambda pkt: pkt.present and pkt.present.dBm_AntSignal),
# dBm_AntNoise
ConditionalField(
_RadiotapReversePadField(
_dbmField("dBm_AntNoise", -256)
),
lambda pkt: pkt.present and pkt.present.dBm_AntNoise),
# Lock_Quality
ConditionalField(
_RadiotapReversePadField(
LEShortField("Lock_Quality", 0),
),
lambda pkt: pkt.present and pkt.present.Lock_Quality),
# Antenna
ConditionalField(
_RadiotapReversePadField(
ByteField("Antenna", 0)
),
lambda pkt: pkt.present and pkt.present.Antenna),
# RX Flags
ConditionalField(
_RadiotapReversePadField(
FlagsField("RXFlags", None, -16, _rt_rxflags)
),
lambda pkt: pkt.present and pkt.present.RXFlags),
# TX Flags
ConditionalField(
_RadiotapReversePadField(
FlagsField("TXFlags", None, -16, _rt_txflags)
),
lambda pkt: pkt.present and pkt.present.TXFlags),
# ChannelPlus
ConditionalField(
_RadiotapReversePadField(
FlagsField("ChannelFlags2", None, -32, _rt_channelflags2)
),
lambda pkt: pkt.present and pkt.present.ChannelPlus),
ConditionalField(
LEShortField("ChannelFrequency", 0),
lambda pkt: pkt.present and pkt.present.ChannelPlus),
ConditionalField(
ByteField("ChannelNumber", 0),
lambda pkt: pkt.present and pkt.present.ChannelPlus),
# MCS
ConditionalField(
_RadiotapReversePadField(
FlagsField("knownMCS", None, -8, _rt_knownmcs)
),
lambda pkt: pkt.present and pkt.present.MCS),
ConditionalField(
BitField("Ness_LSB", 0, 1),
lambda pkt: pkt.present and pkt.present.MCS),
ConditionalField(
BitField("STBC_streams", 0, 2),
lambda pkt: pkt.present and pkt.present.MCS),
ConditionalField(
BitEnumField("FEC_type", 0, 1, {0: "BCC", 1: "LDPC"}),
lambda pkt: pkt.present and pkt.present.MCS),
ConditionalField(
BitEnumField("HT_format", 0, 1, {0: "mixed", 1: "greenfield"}),
lambda pkt: pkt.present and pkt.present.MCS),
ConditionalField(
BitEnumField("guard_interval", 0, 1, {0: "Long_GI", 1: "Short_GI"}), # noqa: E501
lambda pkt: pkt.present and pkt.present.MCS),
ConditionalField(
BitEnumField("MCS_bandwidth", 0, 2, _rt_bandwidth),
lambda pkt: pkt.present and pkt.present.MCS),
ConditionalField(
ByteField("MCS_index", 0),
lambda pkt: pkt.present and pkt.present.MCS),
# A_MPDU
ConditionalField(
_RadiotapReversePadField(
LEIntField("A_MPDU_ref", 0)
),
lambda pkt: pkt.present and pkt.present.A_MPDU),
ConditionalField(
FlagsField("A_MPDU_flags", None, -32, _rt_a_mpdu_flags),
lambda pkt: pkt.present and pkt.present.A_MPDU),
# VHT
ConditionalField(
_RadiotapReversePadField(
FlagsField("KnownVHT", None, -16, _rt_knownvht)
),
lambda pkt: pkt.present and pkt.present.VHT),
ConditionalField(
FlagsField("PresentVHT", None, -8, _rt_presentvht),
lambda pkt: pkt.present and pkt.present.VHT),
ConditionalField(
ByteEnumField("VHT_bandwidth", 0, _rt_vhtbandwidth),
lambda pkt: pkt.present and pkt.present.VHT),
ConditionalField(
StrFixedLenField("mcs_nss", 0, length=5),
lambda pkt: pkt.present and pkt.present.VHT),
ConditionalField(
ByteField("GroupID", 0),
lambda pkt: pkt.present and pkt.present.VHT),
ConditionalField(
ShortField("PartialAID", 0),
lambda pkt: pkt.present and pkt.present.VHT),
# timestamp
ConditionalField(
_RadiotapReversePadField(
LELongField("timestamp", 0)
),
lambda pkt: pkt.present and pkt.present.timestamp),
ConditionalField(
LEShortField("ts_accuracy", 0),
lambda pkt: pkt.present and pkt.present.timestamp),
ConditionalField(
ByteField("ts_position", 0),
lambda pkt: pkt.present and pkt.present.timestamp),
ConditionalField(
ByteField("ts_flags", 0),
lambda pkt: pkt.present and pkt.present.timestamp),
# HE - XXX not complete
ConditionalField(
_RadiotapReversePadField(
ShortField("he_data1", 0)
),
lambda pkt: pkt.present and pkt.present.HE),
ConditionalField(
ShortField("he_data2", 0),
lambda pkt: pkt.present and pkt.present.HE),
ConditionalField(
ShortField("he_data3", 0),
lambda pkt: pkt.present and pkt.present.HE),
ConditionalField(
ShortField("he_data4", 0),
lambda pkt: pkt.present and pkt.present.HE),
ConditionalField(
ShortField("he_data5", 0),
lambda pkt: pkt.present and pkt.present.HE),
ConditionalField(
ShortField("he_data6", 0),
lambda pkt: pkt.present and pkt.present.HE),
# HE_MU
ConditionalField(
_RadiotapReversePadField(
LEShortField("hemu_flags1", 0)
),
lambda pkt: pkt.present and pkt.present.HE_MU),
ConditionalField(
LEShortField("hemu_flags2", 0),
lambda pkt: pkt.present and pkt.present.HE_MU),
ConditionalField(
FieldListField("RU_channel1", [], ByteField,
count_from=lambda x: 4),
lambda pkt: pkt.present and pkt.present.HE_MU),
ConditionalField(
FieldListField("RU_channel2", [], ByteField,
count_from=lambda x: 4),
lambda pkt: pkt.present and pkt.present.HE_MU),
# HE_MU_other_user
ConditionalField(
_RadiotapReversePadField(
LEShortField("hemuou_per_user_1", 0x7fff)
),
lambda pkt: pkt.present and pkt.present.HE_MU_other_user),
ConditionalField(
LEShortField("hemuou_per_user_2", 0x003f),
lambda pkt: pkt.present and pkt.present.HE_MU_other_user),
ConditionalField(
ByteField("hemuou_per_user_position", 0),
lambda pkt: pkt.present and pkt.present.HE_MU_other_user),
ConditionalField(
FlagsField("hemuou_per_user_known", 0, -16,
_rt_hemuother_per_user_known),
lambda pkt: pkt.present and pkt.present.HE_MU_other_user),
# L_SIG
ConditionalField(
_RadiotapReversePadField(
FlagsField("lsig_data1", 0, -16, ["rate", "length"])
),
lambda pkt: pkt.present and pkt.present.L_SIG),
ConditionalField(
BitField("lsig_length", 0, 12),
lambda pkt: pkt.present and pkt.present.L_SIG),
ConditionalField(
BitField("lsig_rate", 0, 4),
lambda pkt: pkt.present and pkt.present.L_SIG),
# Remaining
StrLenField('notdecoded', "",
length_from=lambda pkt: max(
pkt.len - pkt._tmp_dissect_pos, 0
))
]
def guess_payload_class(self, payload):
if self.present and self.present.Flags and self.Flags.FCS:
return Dot11FCS
return Dot11
def post_build(self, p, pay):
if self.len is None:
p = p[:2] + struct.pack("!H", len(p))[::-1] + p[4:]
return p + pay
class Dot11(Packet):
name = "802.11"
fields_desc = [
BitField("subtype", 0, 4),
BitEnumField("type", 0, 2, ["Management", "Control", "Data",
"Reserved"]),
BitField("proto", 0, 2),
FlagsField("FCfield", 0, 8, ["to-DS", "from-DS", "MF", "retry",
"pw-mgt", "MD", "protected", "order"]),
ShortField("ID", 0),
MACField("addr1", ETHER_ANY),
ConditionalField(
MACField("addr2", ETHER_ANY),
lambda pkt: (pkt.type != 1 or
pkt.subtype in [0x8, 0x9, 0xa, 0xb, 0xe, 0xf]),
),
ConditionalField(
MACField("addr3", ETHER_ANY),
lambda pkt: pkt.type in [0, 2],
),
ConditionalField(LEShortField("SC", 0), lambda pkt: pkt.type != 1),
ConditionalField(
MACField("addr4", ETHER_ANY),
lambda pkt: (pkt.type == 2 and
pkt.FCfield & 3 == 3), # from-DS+to-DS
)
]
def mysummary(self):
# Supports both Dot11 and Dot11FCS
return self.sprintf("802.11 %%%s.type%% %%%s.subtype%% %%%s.addr2%% > %%%s.addr1%%" % ((self.__class__.__name__,) * 4)) # noqa: E501
def guess_payload_class(self, payload):
if self.type == 0x02 and (0x08 <= self.subtype <= 0xF and self.subtype != 0xD): # noqa: E501
return Dot11QoS
elif self.FCfield.protected:
# When a frame is handled by encryption, the Protected Frame bit
# (previously called WEP bit) is set to 1, and the Frame Body
# begins with the appropriate cryptographic header.
return Dot11Encrypted
else:
return Packet.guess_payload_class(self, payload)
def answers(self, other):
if isinstance(other, Dot11):
if self.type == 0: # management
if self.addr1.lower() != other.addr2.lower(): # check resp DA w/ req SA # noqa: E501
return 0
if (other.subtype, self.subtype) in [(0, 1), (2, 3), (4, 5)]:
return 1
if self.subtype == other.subtype == 11: # auth
return self.payload.answers(other.payload)
elif self.type == 1: # control
return 0
elif self.type == 2: # data
return self.payload.answers(other.payload)
elif self.type == 3: # reserved
return 0
return 0
def unwep(self, key=None, warn=1):
if self.FCfield & 0x40 == 0:
if warn:
warning("No WEP to remove")
return
if isinstance(self.payload.payload, NoPayload):
if key or conf.wepkey:
self.payload.decrypt(key)
if isinstance(self.payload.payload, NoPayload):
if warn:
warning("Dot11 can't be decrypted. Check conf.wepkey.")
return
self.FCfield &= ~0x40
self.payload = self.payload.payload
class Dot11FCS(Dot11):
name = "802.11-FCS"
match_subclass = True
fields_desc = Dot11.fields_desc + [FCSField("fcs", None, fmt="<I")]
def compute_fcs(self, s):
return struct.pack("!I", crc32(s) & 0xffffffff)[::-1]
def post_build(self, p, pay):
p += pay
if self.fcs is None:
p = p[:-4] + self.compute_fcs(p)
return p
class Dot11QoS(Packet):
name = "802.11 QoS"
fields_desc = [BitField("Reserved", None, 1),
BitField("Ack_Policy", None, 2),
BitField("EOSP", None, 1),
BitField("TID", None, 4),
ByteField("TXOP", None)]
def guess_payload_class(self, payload):
if isinstance(self.underlayer, Dot11):
if self.underlayer.FCfield.protected:
return Dot11Encrypted
return Packet.guess_payload_class(self, payload)
capability_list = ["res8", "res9", "short-slot", "res11",
"res12", "DSSS-OFDM", "res14", "res15",
"ESS", "IBSS", "CFP", "CFP-req",
"privacy", "short-preamble", "PBCC", "agility"]
reason_code = {0: "reserved", 1: "unspec", 2: "auth-expired",
3: "deauth-ST-leaving",
4: "inactivity", 5: "AP-full", 6: "class2-from-nonauth",
7: "class3-from-nonass", 8: "disas-ST-leaving",
9: "ST-not-auth"}
status_code = {0: "success", 1: "failure", 10: "cannot-support-all-cap",
11: "inexist-asso", 12: "asso-denied", 13: "algo-unsupported",
14: "bad-seq-num", 15: "challenge-failure",
16: "timeout", 17: "AP-full", 18: "rate-unsupported"}
class _Dot11NetStats(Packet):
fields_desc = [LELongField("timestamp", 0),
LEShortField("beacon_interval", 0x0064),
FlagsField("cap", 0, 16, capability_list)]
def network_stats(self):
"""Return a dictionary containing a summary of the Dot11
elements fields
"""
summary = {}
crypto = set()
akmsuite_types = {
0x00: "Reserved",
0x01: "802.1X",
0x02: "PSK"
}
p = self.payload
while isinstance(p, Dot11Elt):
if p.ID == 0:
summary["ssid"] = plain_str(p.info)
elif p.ID == 3:
summary["channel"] = ord(p.info)
elif isinstance(p, Dot11EltCountry):
summary["country"] = plain_str(p.country_string[:2])
country_descriptor_types = {
b"I": "Indoor",
b"O": "Outdoor",
b"X": "Non-country",
b"\xff": "Ignored"
}
summary["country_desc_type"] = country_descriptor_types.get(
p.country_string[-1:]
)
elif isinstance(p, Dot11EltRates):
summary["rates"] = p.rates
elif isinstance(p, Dot11EltRSN):
if p.akm_suites:
auth = akmsuite_types.get(p.akm_suites[0].suite)
crypto.add("WPA2/%s" % auth)
else:
crypto.add("WPA2")
elif p.ID == 221:
if isinstance(p, Dot11EltMicrosoftWPA) or \
p.info.startswith(b'\x00P\xf2\x01\x01\x00'):
if p.akm_suites:
auth = akmsuite_types.get(p.akm_suites[0].suite)
crypto.add("WPA/%s" % auth)
else:
crypto.add("WPA")
p = p.payload
if not crypto:
if self.cap.privacy:
crypto.add("WEP")
else:
crypto.add("OPN")
summary["crypto"] = crypto
return summary
class Dot11Beacon(_Dot11NetStats):
name = "802.11 Beacon"
_dot11_info_elts_ids = {
0: "SSID",
1: "Rates",
2: "FHset",
3: "DSset",
4: "CFset",
5: "TIM",
6: "IBSSset",
7: "Country",
10: "Request",
16: "challenge",
33: "PowerCapability",
36: "Channels",
42: "ERPinfo",
45: "HTCapabilities",
46: "QoSCapability",
47: "ERPinfo",
48: "RSNinfo",
50: "ESRates",
52: "PowerConstraint",
61: "HTinfo",
68: "reserved",
107: "Interworking",
127: "ExtendendCapatibilities",
191: "VHTCapabilities",
221: "vendor"
}
class Dot11Elt(Packet):
__slots__ = ["info"]
name = "802.11 Information Element"
fields_desc = [ByteEnumField("ID", 0, _dot11_info_elts_ids),
FieldLenField("len", None, "info", "B"),
StrLenField("info", "", length_from=lambda x: x.len,
max_length=255)]
show_indent = 0
def mysummary(self):
if self.ID == 0:
ssid = repr(self.info)
if ssid[:2] in ['b"', "b'"]:
ssid = ssid[1:]
return "SSID=%s" % ssid, [Dot11]
else:
return ""
registered_ies = {}
@classmethod
def register_variant(cls):
cls.registered_ies[cls.ID.default] = cls
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt:
_id = orb(_pkt[0])
if _id == 221:
oui_a = orb(_pkt[2])
oui_b = orb(_pkt[3])
oui_c = orb(_pkt[4])
if oui_a == 0x00 and oui_b == 0x50 and oui_c == 0xf2:
# MS OUI
type_ = orb(_pkt[5])
if type_ == 0x01:
# MS WPA IE
return Dot11EltMicrosoftWPA
else:
return Dot11EltVendorSpecific
else:
return Dot11EltVendorSpecific
else:
return cls.registered_ies.get(_id, cls)
return cls
def haslayer(self, cls):
if cls == "Dot11Elt":
if isinstance(self, Dot11Elt):
return True
elif issubtype(cls, Dot11Elt):
if isinstance(self, cls):
return True
return super(Dot11Elt, self).haslayer(cls)
def getlayer(self, cls, nb=1, _track=None, _subclass=True, **flt):
return super(Dot11Elt, self).getlayer(cls, nb=nb, _track=_track,
_subclass=True, **flt)
def pre_dissect(self, s):
# Backward compatibility: add info to all elements
# This allows to introduce new Dot11Elt classes without breaking
# previous code
if len(s) >= 3:
length = orb(s[1])
if length > 0 and length <= 255:
self.info = s[2:2 + length]
return s
def post_build(self, p, pay):
if self.len is None:
p = p[:1] + chb(len(p) - 2) + p[2:]
return p + pay
class RSNCipherSuite(Packet):
name = "Cipher suite"
fields_desc = [
X3BytesField("oui", 0x000fac),
ByteEnumField("cipher", 0x04, {
0x00: "Use group cipher suite",
0x01: "WEP-40",
0x02: "TKIP",
0x03: "Reserved",
0x04: "CCMP",
0x05: "WEP-104"
})
]
def extract_padding(self, s):
return "", s
class AKMSuite(Packet):
name = "AKM suite"
fields_desc = [
X3BytesField("oui", 0x000fac),
ByteEnumField("suite", 0x01, {
0x00: "Reserved",
0x01: "IEEE 802.1X / PMKSA caching",
0x02: "PSK"
})
]
def extract_padding(self, s):
return "", s
class PMKIDListPacket(Packet):
name = "PMKIDs"
fields_desc = [
LEFieldLenField("nb_pmkids", 0, count_of="pmk_id_list"),
FieldListField(
"pmkid_list",
None,
XStrFixedLenField("", "", length=16),
count_from=lambda pkt: pkt.nb_pmkids
)
]
def extract_padding(self, s):
return "", s
class Dot11EltRSN(Dot11Elt):
name = "802.11 RSN information"
fields_desc = [
ByteField("ID", 48),
ByteField("len", None),
LEShortField("version", 1),
PacketField("group_cipher_suite", RSNCipherSuite(), RSNCipherSuite),
LEFieldLenField(
"nb_pairwise_cipher_suites",
1,
count_of="pairwise_cipher_suites"
),
PacketListField(
"pairwise_cipher_suites",
[RSNCipherSuite()],
RSNCipherSuite,
count_from=lambda p: p.nb_pairwise_cipher_suites
),
LEFieldLenField(
"nb_akm_suites",
1,
count_of="akm_suites"
),
PacketListField(
"akm_suites",
[AKMSuite()],
AKMSuite,
count_from=lambda p: p.nb_akm_suites
),
BitField("mfp_capable", 0, 1),
BitField("mfp_required", 0, 1),
BitField("gtksa_replay_counter", 0, 2),
BitField("ptksa_replay_counter", 0, 2),
BitField("no_pairwise", 0, 1),
BitField("pre_auth", 0, 1),
BitField("reserved", 0, 8),
ConditionalField(
PacketField("pmkids", None, PMKIDListPacket),
lambda pkt: (
0 if pkt.len is None else
pkt.len - (12 + (pkt.nb_pairwise_cipher_suites * 4) +
(pkt.nb_akm_suites * 4)) >= 18)
)
]
class Dot11EltCountryConstraintTriplet(Packet):
name = "802.11 Country Constraint Triplet"
fields_desc = [
ByteField("first_channel_number", 1),
ByteField("num_channels", 24),
ByteField("mtp", 0)
]
def extract_padding(self, s):
return b"", s
class Dot11EltCountry(Dot11Elt):
name = "802.11 Country"
fields_desc = [
ByteField("ID", 7),
ByteField("len", None),
StrFixedLenField("country_string", b"\0\0\0", length=3),
PacketListField(
"descriptors",
[],
Dot11EltCountryConstraintTriplet,
length_from=lambda pkt: (
pkt.len - 3 - (pkt.len % 3)
)
),
ConditionalField(
ByteField("pad", 0),
lambda pkt: (pkt.len + 1) % 2
)
]
class Dot11EltMicrosoftWPA(Dot11Elt):
name = "802.11 Microsoft WPA"
fields_desc = [
ByteField("ID", 221),
ByteField("len", None),
X3BytesField("oui", 0x0050f2),
XByteField("type", 0x01),
LEShortField("version", 1),
PacketField("group_cipher_suite", RSNCipherSuite(), RSNCipherSuite),
LEFieldLenField(
"nb_pairwise_cipher_suites",
1,
count_of="pairwise_cipher_suites"
),
PacketListField(
"pairwise_cipher_suites",
RSNCipherSuite(),
RSNCipherSuite,
count_from=lambda p: p.nb_pairwise_cipher_suites
),
LEFieldLenField(
"nb_akm_suites",
1,
count_of="akm_suites"
),
PacketListField(
"akm_suites",
AKMSuite(),
AKMSuite,
count_from=lambda p: p.nb_akm_suites
)
]
class Dot11EltRates(Dot11Elt):
name = "802.11 Rates"
fields_desc = [
ByteField("ID", 1),
ByteField("len", None),
FieldListField(
"rates",
[],
XByteField("", 0),
count_from=lambda p: p.len
)
]
class Dot11EltVendorSpecific(Dot11Elt):
name = "802.11 Vendor Specific"
fields_desc = [
ByteField("ID", 221),
ByteField("len", None),
X3BytesField("oui", 0x000000),
StrLenField("info", "", length_from=lambda x: x.len - 3)
]
class Dot11ATIM(Packet):
name = "802.11 ATIM"
class Dot11Disas(Packet):
name = "802.11 Disassociation"
fields_desc = [LEShortEnumField("reason", 1, reason_code)]
class Dot11AssoReq(Packet):
name = "802.11 Association Request"
fields_desc = [FlagsField("cap", 0, 16, capability_list),
LEShortField("listen_interval", 0x00c8)]
class Dot11AssoResp(Packet):
name = "802.11 Association Response"
fields_desc = [FlagsField("cap", 0, 16, capability_list),
LEShortField("status", 0),
LEShortField("AID", 0)]
class Dot11ReassoReq(Packet):
name = "802.11 Reassociation Request"
fields_desc = [FlagsField("cap", 0, 16, capability_list),
LEShortField("listen_interval", 0x00c8),
MACField("current_AP", ETHER_ANY)]
class Dot11ReassoResp(Dot11AssoResp):
name = "802.11 Reassociation Response"
class Dot11ProbeReq(Packet):
name = "802.11 Probe Request"
class Dot11ProbeResp(_Dot11NetStats):
name = "802.11 Probe Response"
class Dot11Auth(Packet):
name = "802.11 Authentication"
fields_desc = [LEShortEnumField("algo", 0, ["open", "sharedkey"]),
LEShortField("seqnum", 0),
LEShortEnumField("status", 0, status_code)]
def answers(self, other):
if self.seqnum == other.seqnum + 1:
return 1
return 0
class Dot11Deauth(Packet):
name = "802.11 Deauthentication"
fields_desc = [LEShortEnumField("reason", 1, reason_code)]
class Dot11Encrypted(Packet):
name = "802.11 Encrypted (unknown algorithm)"
fields_desc = [StrField("data", None)]
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
# Extracted from
# https://github.com/wireshark/wireshark/blob/master/epan/dissectors/packet-ieee80211.c # noqa: E501
KEY_EXTIV = 0x20
EXTIV_LEN = 8
if _pkt and len(_pkt) >= 3:
if (orb(_pkt[3]) & KEY_EXTIV) and (len(_pkt) >= EXTIV_LEN):
if orb(_pkt[1]) == ((orb(_pkt[0]) | 0x20) & 0x7f): # IS_TKIP
return Dot11TKIP
elif orb(_pkt[2]) == 0: # IS_CCMP
return Dot11CCMP
else:
# Unknown encryption algorithm
return Dot11Encrypted
else:
return Dot11WEP
return conf.raw_layer
class Dot11WEP(Dot11Encrypted):
name = "802.11 WEP packet"
fields_desc = [StrFixedLenField("iv", b"\0\0\0", 3),
ByteField("keyid", 0),
StrField("wepdata", None, remain=4),
IntField("icv", None)]
def decrypt(self, key=None):
if key is None:
key = conf.wepkey
if key and conf.crypto_valid:
d = Cipher(
algorithms.ARC4(self.iv + key.encode("utf8")),
None,
default_backend(),
).decryptor()
self.add_payload(LLC(d.update(self.wepdata) + d.finalize()))
def post_dissect(self, s):
self.decrypt()
def build_payload(self):
if self.wepdata is None:
return Packet.build_payload(self)
return b""
@crypto_validator
def encrypt(self, p, pay, key=None):
if key is None:
key = conf.wepkey
if key:
if self.icv is None:
pay += struct.pack("<I", crc32(pay) & 0xffffffff)
icv = b""
else:
icv = p[4:8]
e = Cipher(
algorithms.ARC4(self.iv + key.encode("utf8")),
None,
default_backend(),
).encryptor()
return p[:4] + e.update(pay) + e.finalize() + icv
else:
warning("No WEP key set (conf.wepkey).. strange results expected..") # noqa: E501
return b""
def post_build(self, p, pay):
if self.wepdata is None:
p = self.encrypt(p, raw(pay))
return p
# Dot11TKIP & Dot11CCMP
# we can't dissect ICV / MIC here: they are encrypted
class Dot11TKIP(Dot11Encrypted):
name = "802.11 TKIP packet"
fields_desc = [
# iv - 4 bytes
ByteField("TSC1", 0),
ByteField("WEPSeed", 0),
ByteField("TSC0", 0),
BitField("key_id", 0, 2), #
BitField("ext_iv", 0, 1), # => LE = reversed order
BitField("res", 0, 5), #
# ext_iv - 4 bytes
ConditionalField(ByteField("TSC2", 0), lambda pkt: pkt.ext_iv),
ConditionalField(ByteField("TSC3", 0), lambda pkt: pkt.ext_iv),
ConditionalField(ByteField("TSC4", 0), lambda pkt: pkt.ext_iv),
ConditionalField(ByteField("TSC5", 0), lambda pkt: pkt.ext_iv),
# data
StrField("data", None),
]
class Dot11CCMP(Dot11Encrypted):
name = "802.11 TKIP packet"
fields_desc = [
# iv - 8 bytes
ByteField("PN0", 0),
ByteField("PN1", 0),
ByteField("res0", 0),
BitField("key_id", 0, 2), #
BitField("ext_iv", 0, 1), # => LE = reversed order
BitField("res1", 0, 5), #
ByteField("PN2", 0),
ByteField("PN3", 0),
ByteField("PN4", 0),
ByteField("PN5", 0),
# data
StrField("data", None),
]
class Dot11Ack(Packet):
name = "802.11 Ack packet"
bind_top_down(RadioTap, Dot11FCS, present=2, Flags=16)
bind_layers(PrismHeader, Dot11,)
bind_layers(Dot11, LLC, type=2)
bind_layers(Dot11QoS, LLC,)
bind_layers(Dot11, Dot11AssoReq, subtype=0, type=0)
bind_layers(Dot11, Dot11AssoResp, subtype=1, type=0)
bind_layers(Dot11, Dot11ReassoReq, subtype=2, type=0)
bind_layers(Dot11, Dot11ReassoResp, subtype=3, type=0)
bind_layers(Dot11, Dot11ProbeReq, subtype=4, type=0)
bind_layers(Dot11, Dot11ProbeResp, subtype=5, type=0)
bind_layers(Dot11, Dot11Beacon, subtype=8, type=0)
bind_layers(Dot11, Dot11ATIM, subtype=9, type=0)
bind_layers(Dot11, Dot11Disas, subtype=10, type=0)
bind_layers(Dot11, Dot11Auth, subtype=11, type=0)
bind_layers(Dot11, Dot11Deauth, subtype=12, type=0)
bind_layers(Dot11, Dot11Ack, subtype=13, type=1)
bind_layers(Dot11Beacon, Dot11Elt,)
bind_layers(Dot11AssoReq, Dot11Elt,)
bind_layers(Dot11AssoResp, Dot11Elt,)
bind_layers(Dot11ReassoReq, Dot11Elt,)
bind_layers(Dot11ReassoResp, Dot11Elt,)
bind_layers(Dot11ProbeReq, Dot11Elt,)
bind_layers(Dot11ProbeResp, Dot11Elt,)
bind_layers(Dot11Auth, Dot11Elt,)
bind_layers(Dot11Elt, Dot11Elt,)
bind_layers(Dot11TKIP, conf.raw_layer)
bind_layers(Dot11CCMP, conf.raw_layer)
conf.l2types.register(DLT_IEEE802_11, Dot11)
conf.l2types.register_num2layer(801, Dot11)
conf.l2types.register(DLT_PRISM_HEADER, PrismHeader)
conf.l2types.register_num2layer(802, PrismHeader)
conf.l2types.register(DLT_IEEE802_11_RADIO, RadioTap)
conf.l2types.register_num2layer(803, RadioTap)
class WiFi_am(AnsweringMachine):
"""Before using this, initialize "iffrom" and "ifto" interfaces:
iwconfig iffrom mode monitor
iwpriv orig_ifto hostapd 1
ifconfig ifto up
note: if ifto=wlan0ap then orig_ifto=wlan0
note: ifto and iffrom must be set on the same channel
ex:
ifconfig eth1 up
iwconfig eth1 mode monitor
iwconfig eth1 channel 11
iwpriv wlan0 hostapd 1
ifconfig wlan0ap up
iwconfig wlan0 channel 11
iwconfig wlan0 essid dontexist
iwconfig wlan0 mode managed
"""
function_name = "airpwn"
filter = None
def parse_options(self, iffrom=conf.iface, ifto=conf.iface, replace="",
pattern="", ignorepattern=""):
self.iffrom = iffrom
self.ifto = ifto
self.ptrn = re.compile(pattern.encode())
self.iptrn = re.compile(ignorepattern.encode())
self.replace = replace
def is_request(self, pkt):
if not isinstance(pkt, Dot11):
return 0
if not pkt.FCfield & 1:
return 0
if not pkt.haslayer(TCP):
return 0
tcp = pkt.getlayer(TCP)
pay = raw(tcp.payload)
if not self.ptrn.match(pay):
return 0
if self.iptrn.match(pay) is True:
return 0
return True
def make_reply(self, p):
ip = p.getlayer(IP)
tcp = p.getlayer(TCP)
pay = raw(tcp.payload)
del(p.payload.payload.payload)
p.FCfield = "from-DS"
p.addr1, p.addr2 = p.addr2, p.addr1
p /= IP(src=ip.dst, dst=ip.src)
p /= TCP(sport=tcp.dport, dport=tcp.sport,
seq=tcp.ack, ack=tcp.seq + len(pay),
flags="PA")
q = p.copy()
p /= self.replace
q.ID += 1
q.getlayer(TCP).flags = "RA"
q.getlayer(TCP).seq += len(self.replace)
return [p, q]
def print_reply(self, query, *reply):
p = reply[0][0]
print(p.sprintf("Sent %IP.src%:%IP.sport% > %IP.dst%:%TCP.dport%"))
def send_reply(self, reply):
sendp(reply, iface=self.ifto, **self.optsend)
def sniff(self):
sniff(iface=self.iffrom, **self.optsniff)
conf.stats_dot11_protocols += [Dot11WEP, Dot11Beacon, ]
class Dot11PacketList(PacketList):
def __init__(self, res=None, name="Dot11List", stats=None):
if stats is None:
stats = conf.stats_dot11_protocols
PacketList.__init__(self, res, name, stats)
def toEthernet(self):
data = [x[Dot11] for x in self.res if Dot11 in x and x.type == 2]
r2 = []
for p in data:
q = p.copy()
q.unwep()
r2.append(Ether() / q.payload.payload.payload) # Dot11/LLC/SNAP/IP
return PacketList(r2, name="Ether from %s" % self.listname)
| 1 | 15,260 | I wonder if the version should be part of the deprecation API. It might ease our future selves while debugging issues =) | secdev-scapy | py |
@@ -102,7 +102,7 @@ class ExternalDriverSupplier implements Supplier<WebDriver> {
Optional<Class<? extends Supplier<WebDriver>>> supplierClass = getDelegateClass();
if (supplierClass.isPresent()) {
Class<? extends Supplier<WebDriver>> clazz = supplierClass.get();
- logger.info("Using delegate supplier: " + clazz.getName());
+ logger.finest("Using delegate supplier: " + clazz.getName());
try {
@SuppressWarnings("unchecked")
Constructor<Supplier<WebDriver>> ctor = | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.testing.drivers;
import static java.util.concurrent.TimeUnit.SECONDS;
import com.google.common.base.Suppliers;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.ImmutableCapabilities;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.net.UrlChecker;
import org.openqa.selenium.remote.LocalFileDetector;
import org.openqa.selenium.remote.RemoteWebDriver;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.logging.Logger;
/**
* Supports providing WebDriver instances from an external source using the following system
* properties:
* <dl>
* <dt>selenium.external.serverUrl</dt>
* <dd>Defines the fully qualified URL of an external WebDriver server to send commands to.
* This server <i>must</i> be compliant with the
* <a href="https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol">JSON wire protocol</a>.
* If only this property is provided, then this supplier will provide a new
* {@link RemoteWebDriver} instance pointed at the designated server. Otherwise, if a
* custom supplier is also defined (see below), this supplier will wait for the server to
* be accepting commands before delegating to the designated class for the actual client
* creation.
* </dd>
* <dt>selenium.external.supplierClass</dt>
* <dd>Specifies the fully qualified name of another class on the classpath. This class must
* implement {@code Supplier<WebDriver>} and have a public constructor that accepts two
* {@link Capabilities} objects as arguments (for the desired and required capabilities,
* respectively).
* </dd>
* </dl>
*/
class ExternalDriverSupplier implements Supplier<WebDriver> {
private static final Logger logger = Logger.getLogger(ExternalDriverSupplier.class.getName());
private static final String DELEGATE_SUPPLIER_CLASS_PROPERTY = "selenium.external.supplierClass";
private static final String EXTERNAL_SERVER_URL_PROPERTY = "selenium.external.serverUrl";
private final Capabilities desiredCapabilities;
ExternalDriverSupplier(Capabilities desiredCapabilities) {
this.desiredCapabilities = new ImmutableCapabilities(desiredCapabilities);
}
@Override
public WebDriver get() {
Optional<Supplier<WebDriver>> delegate = createDelegate(desiredCapabilities);
delegate = createForExternalServer(desiredCapabilities, delegate);
return delegate.orElse(Suppliers.ofInstance(null)).get();
}
private static Optional<Supplier<WebDriver>> createForExternalServer(
Capabilities desiredCapabilities,
Optional<Supplier<WebDriver>> delegate) {
String externalUrl = System.getProperty(EXTERNAL_SERVER_URL_PROPERTY);
if (externalUrl != null) {
logger.info("Using external WebDriver server: " + externalUrl);
URL url;
try {
url = new URL(externalUrl);
} catch (MalformedURLException e) {
throw new RuntimeException("Invalid server URL: " + externalUrl, e);
}
Supplier<WebDriver> defaultSupplier = new DefaultRemoteSupplier(url, desiredCapabilities);
Supplier<WebDriver> supplier = new ExternalServerDriverSupplier(
url, delegate.orElse(defaultSupplier));
return Optional.of(supplier);
}
return delegate;
}
private static Optional<Supplier<WebDriver>> createDelegate(Capabilities desiredCapabilities) {
Optional<Class<? extends Supplier<WebDriver>>> supplierClass = getDelegateClass();
if (supplierClass.isPresent()) {
Class<? extends Supplier<WebDriver>> clazz = supplierClass.get();
logger.info("Using delegate supplier: " + clazz.getName());
try {
@SuppressWarnings("unchecked")
Constructor<Supplier<WebDriver>> ctor =
(Constructor<Supplier<WebDriver>>) clazz.getConstructor(Capabilities.class);
return Optional.of(ctor.newInstance(desiredCapabilities));
} catch (InvocationTargetException e) {
throw new RuntimeException(e.getTargetException());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return Optional.empty();
}
@SuppressWarnings("unchecked")
private static Optional<Class<? extends Supplier<WebDriver>>> getDelegateClass() {
String delegateClassName = System.getProperty(DELEGATE_SUPPLIER_CLASS_PROPERTY);
if (delegateClassName != null) {
try {
logger.info("Loading custom supplier: " + delegateClassName);
Class<? extends Supplier<WebDriver>> clazz =
(Class<? extends Supplier<WebDriver>>) Class.forName(delegateClassName);
return Optional.of(clazz);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return Optional.empty();
}
/**
* Waits for an external WebDriver server to be ready before delegating to another supplier
* for driver creation.
*/
private static class ExternalServerDriverSupplier implements Supplier<WebDriver> {
private final URL serverUrl;
private final Supplier<WebDriver> delegateSupplier;
private ExternalServerDriverSupplier(
URL serverUrl, Supplier<WebDriver> delegateSupplier) {
this.serverUrl = serverUrl;
this.delegateSupplier = delegateSupplier;
}
@Override
public WebDriver get() {
try {
logger.info("Waiting for server to be ready at " + serverUrl);
new UrlChecker().waitUntilAvailable(60, SECONDS, new URL(serverUrl + "/status"));
logger.info("Server is ready");
} catch (UrlChecker.TimeoutException e) {
throw new RuntimeException("The external server is not accepting commands", e);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
return delegateSupplier.get();
}
}
/**
* Creates basic {@link RemoteWebDriver} instances.
*/
private static class DefaultRemoteSupplier implements Supplier<WebDriver> {
private final URL url;
private final Capabilities desiredCapabilities;
private DefaultRemoteSupplier(URL url, Capabilities desiredCapabilities) {
this.url = url;
this.desiredCapabilities = desiredCapabilities;
}
@Override
public WebDriver get() {
RemoteWebDriver driver = new RemoteWebDriver(url, desiredCapabilities);
driver.setFileDetector(new LocalFileDetector());
return driver;
}
}
}
| 1 | 16,449 | We chose `info` in the test code for obvious reasons. Changing to `finest` makes debugging harder and noisier. | SeleniumHQ-selenium | rb |
@@ -183,10 +183,11 @@ namespace Microsoft.Sarif.Viewer
HelpLink = rule?.HelpUri?.ToString()
};
- IEnumerable<IEnumerable<AnnotatedCodeLocation>> stackLocations = CreateAnnotationsFromStacks(result.Stacks);
+ IEnumerable<IEnumerable<AnnotatedCodeLocation>> stackLocations = CreateAnnotatedCodeLocationsFromStacks(result.Stacks);
+ IEnumerable<IEnumerable<AnnotatedCodeLocation>> codeFlowLocations = CreateAnnotatedCodeLocationsFromCodeFlows(result.CodeFlows);
CreateAnnotatedCodeLocationCollections(stackLocations, AnnotatedCodeLocationKind.Stack, sarifError);
- CreateAnnotatedCodeLocationCollections(result.CodeFlows, AnnotatedCodeLocationKind.CodeFlow, sarifError);
+ CreateAnnotatedCodeLocationCollections(codeFlowLocations, AnnotatedCodeLocationKind.CodeFlow, sarifError);
CaptureAnnotatedCodeLocations(result.RelatedLocations, AnnotatedCodeLocationKind.Stack, sarifError);
if (region != null) | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using Microsoft.CodeAnalysis.Sarif;
using Microsoft.CodeAnalysis.Sarif.Driver;
using Microsoft.CodeAnalysis.Sarif.Driver.Sdk;
using Microsoft.CodeAnalysis.Sarif.Readers;
using Microsoft.CodeAnalysis.Sarif.Writers;
using Newtonsoft.Json;
namespace Microsoft.Sarif.Viewer
{
public class ErrorListService
{
public static readonly ErrorListService Instance = new ErrorListService();
public static void ProcessLogFile(string filePath, ToolFormat toolFormat = ToolFormat.None)
{
SarifLog log;
JsonSerializerSettings settings = new JsonSerializerSettings()
{
ContractResolver = SarifContractResolver.Instance,
};
string logText;
if (toolFormat == ToolFormat.None)
{
logText = File.ReadAllText(filePath);
}
else if (toolFormat == ToolFormat.PREfast)
{
logText = ToolFormatConverter.ConvertPREfastToStandardFormat(filePath);
}
else
{
// We have conversion to do
var converter = new ToolFormatConverter();
var sb = new StringBuilder();
using (var input = new MemoryStream(File.ReadAllBytes(filePath)))
{
var outputTextWriter = new StringWriter(sb);
var outputJson = new JsonTextWriter(outputTextWriter);
var output = new ResultLogJsonWriter(outputJson);
input.Seek(0, SeekOrigin.Begin);
converter.ConvertToStandardFormat(toolFormat, input, output);
// This is serving as a flush mechanism
output.Dispose();
logText = sb.ToString();
}
}
log = JsonConvert.DeserializeObject<SarifLog>(logText, settings);
ProcessSarifLog(log);
}
private static void ProcessSarifLog(SarifLog sarifLog)
{
foreach (Run run in sarifLog.Runs)
{
Instance.WriteRunToErrorList(run);
}
SarifTableDataSource.Instance.BringToFront();
}
private ErrorListService()
{
this.documentToLineIndexMap = new Dictionary<string, NewLineIndex>();
}
private Dictionary<string, NewLineIndex> documentToLineIndexMap;
private IRule GetRule(Run runLog, string ruleId)
{
if (runLog.Rules == null)
{
return null;
}
foreach (Rule rule in runLog.Rules.Values)
{
if (rule.Id == ruleId) { return rule; }
}
throw new InvalidOperationException();
}
private void WriteRunToErrorList(Run runLog)
{
List<SarifError> sarifErrors = new List<SarifError>();
// Prefer optional fullName, fall back to required Name property
string toolName = runLog.Tool.FullName ?? runLog.Tool.Name;
foreach (Result result in runLog.Results)
{
string category, document;
Region region;
category = null;
if (result.Properties != null)
{
result.Properties.TryGetValue("category", out category);
}
if (result.Locations != null)
{
foreach (Location location in result?.Locations)
{
region = null;
PhysicalLocation physicalLocation = null;
if (location.ResultFile != null)
{
physicalLocation = location.ResultFile;
document = physicalLocation.Uri.LocalPath;
region = physicalLocation.Region;
}
else if (location.AnalysisTarget != null)
{
physicalLocation = location.AnalysisTarget;
document = physicalLocation.Uri.LocalPath;
region = physicalLocation.Region;
}
else
{
document = location.FullyQualifiedLogicalName;
}
AddResult(runLog, sarifErrors, toolName, result, category, document, region);
}
}
else
{
AddResult(runLog, sarifErrors, toolName, result, category, document: @"d:\repros\test.txt", region: null);
}
CodeAnalysisResultManager.Instance.SarifErrors = sarifErrors;
SarifTableDataSource.Instance.AddErrors(sarifErrors);
}
}
private void AddResult(Run runLog, List<SarifError> sarifErrors, string toolName, Result result, string category, string document, Region region)
{
IRule rule;
string shortMessage, fullMessage;
rule = GetRule(runLog, result.RuleId);
shortMessage = result.GetMessageText(rule, concise: true);
fullMessage = result.GetMessageText(rule, concise: false);
if (shortMessage == fullMessage)
{
fullMessage = null;
}
SarifError sarifError = new SarifError(document)
{
Region = region,
RuleId = result.RuleId,
RuleName = rule?.Name,
Kind = result.Kind,
Category = category,
ShortMessage = shortMessage,
FullMessage = fullMessage,
Tool = toolName,
HelpLink = rule?.HelpUri?.ToString()
};
IEnumerable<IEnumerable<AnnotatedCodeLocation>> stackLocations = CreateAnnotationsFromStacks(result.Stacks);
CreateAnnotatedCodeLocationCollections(stackLocations, AnnotatedCodeLocationKind.Stack, sarifError);
CreateAnnotatedCodeLocationCollections(result.CodeFlows, AnnotatedCodeLocationKind.CodeFlow, sarifError);
CaptureAnnotatedCodeLocations(result.RelatedLocations, AnnotatedCodeLocationKind.Stack, sarifError);
if (region != null)
{
sarifError.ColumnNumber = region.StartColumn - 1;
sarifError.LineNumber = region.StartLine - 1;
}
sarifErrors.Add(sarifError);
}
private IEnumerable<IEnumerable<AnnotatedCodeLocation>> CreateAnnotationsFromStacks(IEnumerable<Stack> stacks)
{
List<List<AnnotatedCodeLocation>> codeLocationCollections = new List<List<AnnotatedCodeLocation>>();
foreach (Stack stack in stacks)
{
if (stack.Frames == null) { continue; }
var codeLocations = new List<AnnotatedCodeLocation>();
foreach (StackFrame stackFrame in stack.Frames)
{
codeLocations.Add(new AnnotatedCodeLocation
{
Message = stackFrame.ToString(),
PhysicalLocation = new PhysicalLocation
{
Uri = stackFrame.Uri,
Region = new Region
{
StartLine = stackFrame.Line,
StartColumn = stackFrame.Column
}
}
});
}
codeLocationCollections.Add(codeLocations);
}
return codeLocationCollections;
}
private static void CreateAnnotatedCodeLocationCollections(
IEnumerable<IEnumerable<AnnotatedCodeLocation>> codeLocationCollections,
AnnotatedCodeLocationKind annotatedCodeLocationKind,
SarifError sarifError)
{
if (codeLocationCollections == null)
{
return;
}
foreach (IEnumerable<AnnotatedCodeLocation> codeLocations in codeLocationCollections)
{
CaptureAnnotatedCodeLocations(codeLocations, annotatedCodeLocationKind, sarifError);
}
}
private static void CaptureAnnotatedCodeLocations(IEnumerable<AnnotatedCodeLocation> codeLocations, AnnotatedCodeLocationKind annotatedCodeLocationKind, SarifError sarifError)
{
if (codeLocations == null)
{
return;
}
int annotationCollectionCount = 0;
foreach (AnnotatedCodeLocation codeLocation in codeLocations)
{
PhysicalLocation plc = codeLocation.PhysicalLocation;
sarifError.Annotations.Add(new AnnotatedCodeLocationModel()
{
Index = annotationCollectionCount,
Kind = annotatedCodeLocationKind,
Region = plc.Region,
FilePath = plc.Uri.LocalPath,
Message = codeLocation.Message
});
annotationCollectionCount++;
}
}
private static bool IsError(ResultKind kind)
{
return
kind == ResultKind.ConfigurationError ||
kind == ResultKind.Error ||
kind == ResultKind.InternalError;
}
}
} | 1 | 10,595 | This is just a rename... | microsoft-sarif-sdk | .cs |
@@ -33,7 +33,10 @@ func GetLengthLimitedID(fixedPrefix, suffix string, maxLength int) string {
// start with the character that we use to denote a shortened string, which could
// result in a clash. Hash the value and truncate...
hasher := sha256.New()
- hasher.Write([]byte(suffix))
+ _, err := hasher.Write([]byte(suffix))
+ if err != nil {
+ return ""
+ }
hash := base64.RawURLEncoding.EncodeToString(hasher.Sum(nil))
charsLeftForHash := maxLength - 1 - prefixLen
return fixedPrefix + shortenedPrefix + hash[0:charsLeftForHash] | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hashutils
import (
"crypto/sha256"
"encoding/base64"
)
const shortenedPrefix = "_"
// GetLengthLimitedID returns an ID that consists of the given prefix and, either the given suffix,
// or, if that would exceed the length limit, a cryptographic hash of the suffix, truncated to the
// required length.
func GetLengthLimitedID(fixedPrefix, suffix string, maxLength int) string {
prefixLen := len(fixedPrefix)
suffixLen := len(suffix)
totalLen := prefixLen + suffixLen
if totalLen > maxLength || (totalLen == maxLength && suffix[0:1] == shortenedPrefix) {
// Either it's just too long, or it's exactly the right length but it happens to
// start with the character that we use to denote a shortened string, which could
// result in a clash. Hash the value and truncate...
hasher := sha256.New()
hasher.Write([]byte(suffix))
hash := base64.RawURLEncoding.EncodeToString(hasher.Sum(nil))
charsLeftForHash := maxLength - 1 - prefixLen
return fixedPrefix + shortenedPrefix + hash[0:charsLeftForHash]
}
// No need to shorten.
return fixedPrefix + suffix
}
| 1 | 17,190 | Should probably panic here. I think hashers are contracted not to return errors (and returning "" doesn't handle the error) | projectcalico-felix | go |
@@ -13,7 +13,7 @@ import (
// Address is the interface that defines methods to manage Filecoin addresses and wallets.
type Address interface {
Addrs() Addrs
- Import(ctx context.Context, f files.Directory) ([]address.Address, error)
+ Import(ctx context.Context, f files.File) ([]address.Address, error)
Export(ctx context.Context, addrs []address.Address) ([]*types.KeyInfo, error)
}
| 1 | package api
import (
"context"
"gx/ipfs/QmQmhotPUzVrMEWNK3x1R5jQ5ZHWyL7tVUrmRPjrBrvyCb/go-ipfs-files"
"gx/ipfs/QmTu65MVbemtUxJEWgsTtzv9Zv9P8rvmqNA4eG9TrTRGYc/go-libp2p-peer"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/types"
)
// Address is the interface that defines methods to manage Filecoin addresses and wallets.
type Address interface {
Addrs() Addrs
Import(ctx context.Context, f files.Directory) ([]address.Address, error)
Export(ctx context.Context, addrs []address.Address) ([]*types.KeyInfo, error)
}
// Addrs is the interface that defines method to interact with addresses.
type Addrs interface {
New(ctx context.Context) (address.Address, error)
Ls(ctx context.Context) ([]address.Address, error)
Lookup(ctx context.Context, addr address.Address) (peer.ID, error)
}
| 1 | 17,220 | Aside: this is a confusing name for an interface that contains multiple addresses. | filecoin-project-venus | go |
@@ -47,10 +47,8 @@ module Bolt
state = node_result['state']
result = node_result['result']
- result_output = Bolt::Node::ResultOutput.new
- result_output.stdout << result.to_json
if state == 'finished'
- Bolt::Node::Success.new(result.to_json, result_output)
+ exit_code = 0
else
# Try to extract the exit_code from _error
begin | 1 | require 'base64'
require 'json'
require 'orchestrator_client'
module Bolt
class Orch < Node
CONF_FILE = File.expand_path('~/.puppetlabs/client-tools/orchestrator.conf')
BOLT_MOCK_FILE = 'bolt/tasks/init'.freeze
def connect; end
def disconnect; end
def make_client
OrchestratorClient.new({}, true)
end
def client
@client ||= make_client
end
# This avoids a refactor to pass more task data around
def task_name_from_path(path)
path = File.absolute_path(path)
parts = path.split(File::Separator)
if parts.length < 3 || parts[-2] != 'tasks'
raise ArgumentError, "Task path was not inside a module."
end
mod = parts[-3]
name = File.basename(path).split('.')[0]
if name == 'init'
mod
else
"#{mod}::#{name}"
end
end
def _run_task(task, _input_method, arguments)
body = { task: task_name_from_path(task),
params: arguments,
scope: {
nodes: [@host]
} }
# Should we handle errors here or let them propagate?
results = client.run_task(body)
node_result = results[0]
state = node_result['state']
result = node_result['result']
result_output = Bolt::Node::ResultOutput.new
result_output.stdout << result.to_json
if state == 'finished'
Bolt::Node::Success.new(result.to_json, result_output)
else
# Try to extract the exit_code from _error
begin
exit_code = result['_error']['details']['exit_code'] || 'unknown'
rescue NoMethodError
exit_code = 'unknown'
end
Bolt::Node::Failure.new(exit_code, result_output)
end
end
# run_task generates a result that makes sense for a generic task which
# needs to be unwrapped to extract stdout/stderr/exitcode.
#
def unwrap_bolt_result(result)
task_result = JSON.parse(result.output.stdout.string)
if task_result['exit_code'].nil?
# something went wrong return the failure
return result
end
# Otherwise create a new result with the captured output
result_output = Bolt::Node::ResultOutput.new
result_output.stdout << task_result['stdout']
result_output.stderr << task_result['stderr']
if (task_result['exit_code']).zero?
Bolt::Node::Success.new(task_result['stdout'], result_output)
else
Bolt::Node::Failure.new(task_result['exit_code'], result_output)
end
end
def _run_command(command, options = {})
result = _run_task(BOLT_MOCK_FILE,
'stdin',
action: 'command',
command: command,
options: options)
unwrap_bolt_result(result)
end
def _upload(source, destination)
content = File.open(source, &:read)
content = Base64.encode64(content)
mode = File.stat(source).mode
params = {
action: 'upload',
path: destination,
content: content,
mode: mode
}
_run_task(BOLT_MOCK_FILE, 'stdin', params)
end
def _run_script(script, arguments)
content = File.open(script, &:read)
content = Base64.encode64(content)
params = {
action: 'script',
content: content,
arguments: arguments
}
unwrap_bolt_result(_run_task(BOLT_MOCK_FILE, 'stdin', params))
end
end
end
| 1 | 7,003 | We expect to use Bolt::CommandResult for scripts as well? | puppetlabs-bolt | rb |
@@ -386,7 +386,9 @@ module RSpec
user_metadata = Metadata.build_hash_from(args)
@metadata = Metadata::ExampleGroupHash.create(
- superclass_metadata, user_metadata, description, *args, &example_group_block
+ superclass_metadata, user_metadata,
+ superclass.method(:next_runnable_index_for),
+ description, *args, &example_group_block
)
hooks.register_globals(self, RSpec.configuration.hooks) | 1 | RSpec::Support.require_rspec_support 'recursive_const_methods'
module RSpec
module Core
# ExampleGroup and {Example} are the main structural elements of
# rspec-core. Consider this example:
#
# describe Thing do
# it "does something" do
# end
# end
#
# The object returned by `describe Thing` is a subclass of ExampleGroup.
# The object returned by `it "does something"` is an instance of Example,
# which serves as a wrapper for an instance of the ExampleGroup in which it
# is declared.
#
# Example group bodies (e.g. `describe` or `context` blocks) are evaluated
# in the context of a new subclass of ExampleGroup. Individual examples are
# evaluated in the context of an instance of the specific ExampleGroup
# subclass to which they belong.
#
# Besides the class methods defined here, there are other interesting macros
# defined in {Hooks}, {MemoizedHelpers::ClassMethods} and
# {SharedExampleGroup}. There are additional instance methods available to
# your examples defined in {MemoizedHelpers} and {Pending}.
class ExampleGroup
extend Hooks
include MemoizedHelpers
extend MemoizedHelpers::ClassMethods
include Pending
extend SharedExampleGroup
# @private
def self.idempotently_define_singleton_method(name, &definition)
(class << self; self; end).module_exec do
remove_method(name) if method_defined?(name)
define_method(name, &definition)
end
end
# @!group Metadata
# The [Metadata](Metadata) object associated with this group.
# @see Metadata
def self.metadata
@metadata ||= nil
end
# Temporarily replace the provided metadata.
# Intended primarily to allow an example group's singleton class
# to return the metadata of the example that it exists for. This
# is necessary for shared example group inclusion to work properly
# with singleton example groups.
# @private
def self.with_replaced_metadata(meta)
orig_metadata = metadata
@metadata = meta
yield
ensure
@metadata = orig_metadata
end
# @private
# @return [Metadata] belonging to the parent of a nested {ExampleGroup}
def self.superclass_metadata
@superclass_metadata ||= superclass.respond_to?(:metadata) ? superclass.metadata : nil
end
# @private
def self.delegate_to_metadata(*names)
names.each do |name|
idempotently_define_singleton_method(name) { metadata.fetch(name) }
end
end
delegate_to_metadata :described_class, :file_path, :location
# @return [String] the current example group description
def self.description
description = metadata[:description]
RSpec.configuration.format_docstrings_block.call(description)
end
# Returns the class or module passed to the `describe` method (or alias).
# Returns nil if the subject is not a class or module.
# @example
# describe Thing do
# it "does something" do
# described_class == Thing
# end
# end
#
def described_class
self.class.described_class
end
# @!endgroup
# @!group Defining Examples
# @private
# @macro [attach] define_example_method
# @!scope class
# @overload $1
# @overload $1(&example_implementation)
# @param example_implementation [Block] The implementation of the example.
# @overload $1(doc_string, *metadata_keys, metadata={})
# @param doc_string [String] The example's doc string.
# @param metadata [Hash] Metadata for the example.
# @param metadata_keys [Array<Symbol>] Metadata tags for the example.
# Will be transformed into hash entries with `true` values.
# @overload $1(doc_string, *metadata_keys, metadata={}, &example_implementation)
# @param doc_string [String] The example's doc string.
# @param metadata [Hash] Metadata for the example.
# @param metadata_keys [Array<Symbol>] Metadata tags for the example.
# Will be transformed into hash entries with `true` values.
# @param example_implementation [Block] The implementation of the example.
# @yield [Example] the example object
# @example
# $1 do
# end
#
# $1 "does something" do
# end
#
# $1 "does something", :slow, :uses_js do
# end
#
# $1 "does something", :with => 'additional metadata' do
# end
#
# $1 "does something" do |ex|
# # ex is the Example object that contains metadata about the example
# end
def self.define_example_method(name, extra_options={})
idempotently_define_singleton_method(name) do |*all_args, &block|
desc, *args = *all_args
options = Metadata.build_hash_from(args)
options.update(:skip => RSpec::Core::Pending::NOT_YET_IMPLEMENTED) unless block
options.update(extra_options)
examples << RSpec::Core::Example.new(self, desc, options, block)
examples.last
end
end
# Defines an example within a group.
define_example_method :example
# Defines an example within a group.
# This is the primary API to define a code example.
define_example_method :it
# Defines an example within a group.
# Useful for when your docstring does not read well off of `it`.
# @example
# RSpec.describe MyClass do
# specify "#do_something is deprecated" do
# # ...
# end
# end
define_example_method :specify
# Shortcut to define an example with `:focus => true`.
# @see example
define_example_method :focus, :focus => true
# Shortcut to define an example with `:focus => true`.
# @see example
define_example_method :fexample, :focus => true
# Shortcut to define an example with `:focus => true`.
# @see example
define_example_method :fit, :focus => true
# Shortcut to define an example with `:focus => true`.
# @see example
define_example_method :fspecify, :focus => true
# Shortcut to define an example with `:skip => 'Temporarily skipped with xexample'`.
# @see example
define_example_method :xexample, :skip => 'Temporarily skipped with xexample'
# Shortcut to define an example with `:skip => 'Temporarily skipped with xit'`.
# @see example
define_example_method :xit, :skip => 'Temporarily skipped with xit'
# Shortcut to define an example with `:skip => 'Temporarily skipped with xspecify'`.
# @see example
define_example_method :xspecify, :skip => 'Temporarily skipped with xspecify'
# Shortcut to define an example with `:skip => true`
# @see example
define_example_method :skip, :skip => true
# Shortcut to define an example with `:pending => true`
# @see example
define_example_method :pending, :pending => true
# @!endgroup
# @!group Defining Example Groups
# @private
# @macro [attach] define_example_group_method
# @!scope class
# @overload $1
# @overload $1(&example_group_definition)
# @param example_group_definition [Block] The definition of the example group.
# @overload $1(doc_string, *metadata_keys, metadata={}, &example_implementation)
# @param doc_string [String] The group's doc string.
# @param metadata [Hash] Metadata for the group.
# @param metadata_keys [Array<Symbol>] Metadata tags for the group.
# Will be transformed into hash entries with `true` values.
# @param example_group_definition [Block] The definition of the example group.
#
# Generates a subclass of this example group which inherits
# everything except the examples themselves.
#
# @example
#
# RSpec.describe "something" do # << This describe method is defined in
# # << RSpec::Core::DSL, included in the
# # << global namespace (optional)
# before do
# do_something_before
# end
#
# let(:thing) { Thing.new }
#
# $1 "attribute (of something)" do
# # examples in the group get the before hook
# # declared above, and can access `thing`
# end
# end
#
# @see DSL#describe
def self.define_example_group_method(name, metadata={})
idempotently_define_singleton_method(name) do |*args, &example_group_block|
thread_data = RSpec.thread_local_metadata
top_level = self == ExampleGroup
if top_level
if thread_data[:in_example_group]
raise "Creating an isolated context from within a context is " \
"not allowed. Change `RSpec.#{name}` to `#{name}` or " \
"move this to a top-level scope."
end
thread_data[:in_example_group] = true
end
begin
description = args.shift
combined_metadata = metadata.dup
combined_metadata.merge!(args.pop) if args.last.is_a? Hash
args << combined_metadata
subclass(self, description, args, &example_group_block).tap do |child|
children << child
end
ensure
thread_data.delete(:in_example_group) if top_level
end
end
RSpec::Core::DSL.expose_example_group_alias(name)
end
define_example_group_method :example_group
# An alias of `example_group`. Generally used when grouping examples by a
# thing you are describing (e.g. an object, class or method).
# @see example_group
define_example_group_method :describe
# An alias of `example_group`. Generally used when grouping examples
# contextually (e.g. "with xyz", "when xyz" or "if xyz").
# @see example_group
define_example_group_method :context
# Shortcut to temporarily make an example group skipped.
# @see example_group
define_example_group_method :xdescribe, :skip => "Temporarily skipped with xdescribe"
# Shortcut to temporarily make an example group skipped.
# @see example_group
define_example_group_method :xcontext, :skip => "Temporarily skipped with xcontext"
# Shortcut to define an example group with `:focus => true`.
# @see example_group
define_example_group_method :fdescribe, :focus => true
# Shortcut to define an example group with `:focus => true`.
# @see example_group
define_example_group_method :fcontext, :focus => true
# @!endgroup
# @!group Including Shared Example Groups
# @private
# @macro [attach] define_nested_shared_group_method
# @!scope class
#
# @see SharedExampleGroup
def self.define_nested_shared_group_method(new_name, report_label="it should behave like")
idempotently_define_singleton_method(new_name) do |name, *args, &customization_block|
# Pass :caller so the :location metadata is set properly.
# Otherwise, it'll be set to the next line because that's
# the block's source_location.
group = example_group("#{report_label} #{name}", :caller => (the_caller = caller)) do
find_and_eval_shared("examples", name, the_caller.first, *args, &customization_block)
end
group.metadata[:shared_group_name] = name
group
end
end
# Generates a nested example group and includes the shared content
# mapped to `name` in the nested group.
define_nested_shared_group_method :it_behaves_like, "behaves like"
# Generates a nested example group and includes the shared content
# mapped to `name` in the nested group.
define_nested_shared_group_method :it_should_behave_like
# Includes shared content mapped to `name` directly in the group in which
# it is declared, as opposed to `it_behaves_like`, which creates a nested
# group. If given a block, that block is also eval'd in the current
# context.
#
# @see SharedExampleGroup
def self.include_context(name, *args, &block)
find_and_eval_shared("context", name, caller.first, *args, &block)
end
# Includes shared content mapped to `name` directly in the group in which
# it is declared, as opposed to `it_behaves_like`, which creates a nested
# group. If given a block, that block is also eval'd in the current
# context.
#
# @see SharedExampleGroup
def self.include_examples(name, *args, &block)
find_and_eval_shared("examples", name, caller.first, *args, &block)
end
# @private
def self.find_and_eval_shared(label, name, inclusion_location, *args, &customization_block)
shared_block = RSpec.world.shared_example_group_registry.find(parent_groups, name)
unless shared_block
raise ArgumentError, "Could not find shared #{label} #{name.inspect}"
end
SharedExampleGroupInclusionStackFrame.with_frame(name, Metadata.relative_path(inclusion_location)) do
module_exec(*args, &shared_block)
module_exec(&customization_block) if customization_block
end
end
# @!endgroup
# @private
def self.subclass(parent, description, args, &example_group_block)
subclass = Class.new(parent)
subclass.set_it_up(description, *args, &example_group_block)
ExampleGroups.assign_const(subclass)
subclass.module_exec(&example_group_block) if example_group_block
# The LetDefinitions module must be included _after_ other modules
# to ensure that it takes precedence when there are name collisions.
# Thus, we delay including it until after the example group block
# has been eval'd.
MemoizedHelpers.define_helpers_on(subclass)
subclass
end
# @private
def self.set_it_up(description, *args, &example_group_block)
# Ruby 1.9 has a bug that can lead to infinite recursion and a
# SystemStackError if you include a module in a superclass after
# including it in a subclass: https://gist.github.com/845896
# To prevent this, we must include any modules in
# RSpec::Core::ExampleGroup before users create example groups and have
# a chance to include the same module in a subclass of
# RSpec::Core::ExampleGroup. So we need to configure example groups
# here.
ensure_example_groups_are_configured
user_metadata = Metadata.build_hash_from(args)
@metadata = Metadata::ExampleGroupHash.create(
superclass_metadata, user_metadata, description, *args, &example_group_block
)
hooks.register_globals(self, RSpec.configuration.hooks)
RSpec.configuration.configure_group(self)
end
# @private
def self.examples
@examples ||= []
end
# @private
def self.filtered_examples
RSpec.world.filtered_examples[self]
end
# @private
def self.descendant_filtered_examples
@descendant_filtered_examples ||= filtered_examples +
FlatMap.flat_map(children, &:descendant_filtered_examples)
end
# @private
def self.children
@children ||= []
end
# @private
def self.descendants
@_descendants ||= [self] + FlatMap.flat_map(children, &:descendants)
end
## @private
def self.parent_groups
@parent_groups ||= ancestors.select { |a| a < RSpec::Core::ExampleGroup }
end
# @private
def self.top_level?
@top_level ||= superclass == ExampleGroup
end
# @private
def self.ensure_example_groups_are_configured
unless defined?(@@example_groups_configured)
RSpec.configuration.configure_mock_framework
RSpec.configuration.configure_expectation_framework
# rubocop:disable Style/ClassVars
@@example_groups_configured = true
# rubocop:enable Style/ClassVars
end
end
# @private
def self.before_context_ivars
@before_context_ivars ||= {}
end
# @private
def self.store_before_context_ivars(example_group_instance)
each_instance_variable_for_example(example_group_instance) do |ivar|
before_context_ivars[ivar] = example_group_instance.instance_variable_get(ivar)
end
end
# @private
def self.run_before_context_hooks(example_group_instance)
set_ivars(example_group_instance, superclass_before_context_ivars)
ContextHookMemoizedHash::Before.isolate_for_context_hook(example_group_instance) do
hooks.run(:before, :context, example_group_instance)
end
ensure
store_before_context_ivars(example_group_instance)
end
if RUBY_VERSION.to_f >= 1.9
# @private
def self.superclass_before_context_ivars
superclass.before_context_ivars
end
else # 1.8.7
# @private
def self.superclass_before_context_ivars
if superclass.respond_to?(:before_context_ivars)
superclass.before_context_ivars
else
# `self` must be the singleton class of an ExampleGroup instance.
# On 1.8.7, the superclass of a singleton class of an instance of A
# is A's singleton class. On 1.9+, it's A. On 1.8.7, the first ancestor
# is A, so we can mirror 1.8.7's behavior here. Note that we have to
# search for the first that responds to `before_context_ivars`
# in case a module has been included in the singleton class.
ancestors.find { |a| a.respond_to?(:before_context_ivars) }.before_context_ivars
end
end
end
# @private
def self.run_after_context_hooks(example_group_instance)
set_ivars(example_group_instance, before_context_ivars)
ContextHookMemoizedHash::After.isolate_for_context_hook(example_group_instance) do
hooks.run(:after, :context, example_group_instance)
end
ensure
before_context_ivars.clear
end
# Runs all the examples in this group.
def self.run(reporter=RSpec::Core::NullReporter)
if RSpec.world.wants_to_quit
RSpec.world.clear_remaining_example_groups if top_level?
return
end
reporter.example_group_started(self)
should_run_context_hooks = descendant_filtered_examples.any?
begin
run_before_context_hooks(new('before(:context) hook')) if should_run_context_hooks
result_for_this_group = run_examples(reporter)
results_for_descendants = ordering_strategy.order(children).map { |child| child.run(reporter) }.all?
result_for_this_group && results_for_descendants
rescue Pending::SkipDeclaredInExample => ex
for_filtered_examples(reporter) { |example| example.skip_with_exception(reporter, ex) }
rescue Exception => ex
RSpec.world.wants_to_quit = true if fail_fast?
for_filtered_examples(reporter) { |example| example.fail_with_exception(reporter, ex) }
ensure
run_after_context_hooks(new('after(:context) hook')) if should_run_context_hooks
reporter.example_group_finished(self)
end
end
# @private
def self.ordering_strategy
order = metadata.fetch(:order, :global)
registry = RSpec.configuration.ordering_registry
registry.fetch(order) do
warn <<-WARNING.gsub(/^ +\|/, '')
|WARNING: Ignoring unknown ordering specified using `:order => #{order.inspect}` metadata.
| Falling back to configured global ordering.
| Unrecognized ordering specified at: #{location}
WARNING
registry.fetch(:global)
end
end
# @private
def self.run_examples(reporter)
ordering_strategy.order(filtered_examples).map do |example|
next if RSpec.world.wants_to_quit
instance = new(example.inspect_output)
set_ivars(instance, before_context_ivars)
succeeded = example.run(instance, reporter)
RSpec.world.wants_to_quit = true if fail_fast? && !succeeded
succeeded
end.all?
end
# @private
def self.for_filtered_examples(reporter, &block)
filtered_examples.each(&block)
children.each do |child|
reporter.example_group_started(child)
child.for_filtered_examples(reporter, &block)
reporter.example_group_finished(child)
end
false
end
# @private
def self.fail_fast?
RSpec.configuration.fail_fast?
end
# @private
def self.declaration_line_numbers
@declaration_line_numbers ||= [metadata[:line_number]] +
examples.map { |e| e.metadata[:line_number] } +
FlatMap.flat_map(children, &:declaration_line_numbers)
end
# @private
def self.top_level_description
parent_groups.last.description
end
# @private
def self.set_ivars(instance, ivars)
ivars.each { |name, value| instance.instance_variable_set(name, value) }
end
if RUBY_VERSION.to_f < 1.9
# @private
INSTANCE_VARIABLE_TO_IGNORE = '@__inspect_output'.freeze
else
# @private
INSTANCE_VARIABLE_TO_IGNORE = :@__inspect_output
end
# @private
def self.each_instance_variable_for_example(group)
group.instance_variables.each do |ivar|
yield ivar unless ivar == INSTANCE_VARIABLE_TO_IGNORE
end
end
def initialize(inspect_output=nil)
@__inspect_output = inspect_output || '(no description provided)'
end
# @private
def inspect
"#<#{self.class} #{@__inspect_output}>"
end
unless method_defined?(:singleton_class) # for 1.8.7
# @private
def singleton_class
class << self; self; end
end
end
# Raised when an RSpec API is called in the wrong scope, such as `before`
# being called from within an example rather than from within an example
# group block.
WrongScopeError = Class.new(NoMethodError)
def self.method_missing(name, *args)
if method_defined?(name)
raise WrongScopeError,
"`#{name}` is not available on an example group (e.g. a " \
"`describe` or `context` block). It is only available from " \
"within individual examples (e.g. `it` blocks) or from " \
"constructs that run in the scope of an example (e.g. " \
"`before`, `let`, etc)."
end
super
end
private_class_method :method_missing
private
def method_missing(name, *args)
if self.class.respond_to?(name)
raise WrongScopeError,
"`#{name}` is not available from within an example (e.g. an " \
"`it` block) or from constructs that run in the scope of an " \
"example (e.g. `before`, `let`, etc). It is only available " \
"on an example group (e.g. a `describe` or `context` block)."
end
super
end
end
# @private
# Unnamed example group used by `SuiteHookContext`.
class AnonymousExampleGroup < ExampleGroup
def self.metadata
{}
end
end
# Contains information about the inclusion site of a shared example group.
class SharedExampleGroupInclusionStackFrame
# @return [String] the name of the shared example group
attr_reader :shared_group_name
# @return [String] the location where the shared example was included
attr_reader :inclusion_location
def initialize(shared_group_name, inclusion_location)
@shared_group_name = shared_group_name
@inclusion_location = inclusion_location
end
# @return [String] The {#inclusion_location}, formatted for display by a formatter.
def formatted_inclusion_location
@formatted_inclusion_location ||= begin
RSpec.configuration.backtrace_formatter.backtrace_line(
inclusion_location.sub(/(:\d+):in .+$/, '\1')
)
end
end
# @return [String] Description of this stack frame, in the form used by
# RSpec's built-in formatters.
def description
@description ||= "Shared Example Group: #{shared_group_name.inspect} " \
"called from #{formatted_inclusion_location}"
end
# @private
def self.current_backtrace
RSpec.thread_local_metadata[:shared_example_group_inclusions].reverse
end
# @private
def self.with_frame(name, location)
current_stack = RSpec.thread_local_metadata[:shared_example_group_inclusions]
current_stack << new(name, location)
yield
ensure
current_stack.pop
end
end
end
# @private
#
# Namespace for the example group subclasses generated by top-level
# `describe`.
module ExampleGroups
extend Support::RecursiveConstMethods
def self.assign_const(group)
base_name = base_name_for(group)
const_scope = constant_scope_for(group)
name = disambiguate(base_name, const_scope)
const_scope.const_set(name, group)
end
def self.constant_scope_for(group)
const_scope = group.superclass
const_scope = self if const_scope == ::RSpec::Core::ExampleGroup
const_scope
end
def self.base_name_for(group)
return "Anonymous" if group.description.empty?
# Convert to CamelCase.
name = ' ' << group.description
name.gsub!(/[^0-9a-zA-Z]+([0-9a-zA-Z])/) do
match = ::Regexp.last_match[1]
match.upcase!
match
end
name.lstrip! # Remove leading whitespace
name.gsub!(/\W/, ''.freeze) # JRuby, RBX and others don't like non-ascii in const names
# Ruby requires first const letter to be A-Z. Use `Nested`
# as necessary to enforce that.
name.gsub!(/\A([^A-Z]|\z)/, 'Nested\1'.freeze)
name
end
if RUBY_VERSION == '1.9.2'
class << self
alias _base_name_for base_name_for
def base_name_for(group)
_base_name_for(group) + '_'
end
end
private_class_method :_base_name_for
end
def self.disambiguate(name, const_scope)
return name unless const_defined_on?(const_scope, name)
# Add a trailing number if needed to disambiguate from an existing
# constant.
name << "_2"
name.next! while const_defined_on?(const_scope, name)
name
end
end
end
| 1 | 14,762 | same thing as above inre commas and args? | rspec-rspec-core | rb |
@@ -224,6 +224,9 @@ public class SolrConfig extends XmlConfigFile implements MapSerializable {
queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1));
queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE);
enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false);
+
+ useCircuitBreakers = getBool("query/useCircuitBreakers", false);
+ memoryCircuitBreakerThreshold = getInt("query/memoryCircuitBreakerThreshold", 100);
useRangeVersionsForPeerSync = getBool("peerSync/useRangeVersions", true);
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPathConstants;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.invoke.MethodHandles;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.collect.ImmutableList;
import org.apache.commons.io.FileUtils;
import org.apache.lucene.index.IndexDeletionPolicy;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.Version;
import org.apache.solr.client.solrj.io.stream.expr.Expressible;
import org.apache.solr.cloud.RecoveryStrategy;
import org.apache.solr.cloud.ZkSolrResourceLoader;
import org.apache.solr.common.MapSerializable;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.handler.component.SearchComponent;
import org.apache.solr.request.SolrRequestHandler;
import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.transform.TransformerFactory;
import org.apache.solr.rest.RestManager;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.IndexSchemaFactory;
import org.apache.solr.search.CacheConfig;
import org.apache.solr.search.CaffeineCache;
import org.apache.solr.search.QParserPlugin;
import org.apache.solr.search.SolrCache;
import org.apache.solr.search.ValueSourceParser;
import org.apache.solr.search.stats.StatsCache;
import org.apache.solr.servlet.SolrRequestParsers;
import org.apache.solr.spelling.QueryConverter;
import org.apache.solr.update.SolrIndexConfig;
import org.apache.solr.update.UpdateLog;
import org.apache.solr.update.processor.UpdateRequestProcessorChain;
import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
import org.apache.solr.util.DOMUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import static org.apache.solr.common.params.CommonParams.NAME;
import static org.apache.solr.common.params.CommonParams.PATH;
import static org.apache.solr.common.util.Utils.fromJSON;
import static org.apache.solr.common.util.Utils.makeMap;
import static org.apache.solr.core.ConfigOverlay.ZNODEVER;
import static org.apache.solr.core.SolrConfig.PluginOpts.LAZY;
import static org.apache.solr.core.SolrConfig.PluginOpts.MULTI_OK;
import static org.apache.solr.core.SolrConfig.PluginOpts.NOOP;
import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_CLASS;
import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME;
import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME_IN_OVERLAY;
/**
* Provides a static reference to a Config object modeling the main
* configuration data for a a Solr instance -- typically found in
* "solrconfig.xml".
*/
public class SolrConfig extends XmlConfigFile implements MapSerializable {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String DEFAULT_CONF_FILE = "solrconfig.xml";
private RequestParams requestParams;
public enum PluginOpts {
MULTI_OK,
REQUIRE_NAME,
REQUIRE_NAME_IN_OVERLAY,
REQUIRE_CLASS,
LAZY,
// EnumSet.of and/or EnumSet.copyOf(Collection) are annoying
// because of type determination
NOOP
}
private int multipartUploadLimitKB;
private int formUploadLimitKB;
private boolean enableRemoteStreams;
private boolean enableStreamBody;
private boolean handleSelect;
private boolean addHttpRequestToContext;
private final SolrRequestParsers solrRequestParsers;
/**
* TEST-ONLY: Creates a configuration instance from an instance directory and file name
* @param instanceDir the directory used to create the resource loader
* @param name the configuration name used by the loader if the stream is null
*/
public SolrConfig(Path instanceDir, String name)
throws ParserConfigurationException, IOException, SAXException {
this(new SolrResourceLoader(instanceDir), name, true, null);
}
public static SolrConfig readFromResourceLoader(SolrResourceLoader loader, String name, boolean isConfigsetTrusted, Properties substitutableProperties) {
try {
return new SolrConfig(loader, name, isConfigsetTrusted, substitutableProperties);
} catch (Exception e) {
String resource;
if (loader instanceof ZkSolrResourceLoader) {
resource = name;
} else {
resource = Paths.get(loader.getConfigDir()).resolve(name).toString();
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading solr config from " + resource, e);
}
}
/**
* Creates a configuration instance from a resource loader, a configuration name and a stream.
* If the stream is null, the resource loader will open the configuration stream.
* If the stream is not null, no attempt to load the resource will occur (the name is not used).
* @param loader the resource loader
* @param name the configuration name
* @param isConfigsetTrusted false if configset was uploaded using unsecured configset upload API, true otherwise
* @param substitutableProperties optional properties to substitute into the XML
*/
private SolrConfig(SolrResourceLoader loader, String name, boolean isConfigsetTrusted, Properties substitutableProperties)
throws ParserConfigurationException, IOException, SAXException {
// insist we have non-null substituteProperties; it might get overlayed
super(loader, name, null, "/config/", substitutableProperties == null ? new Properties() : substitutableProperties);
getOverlay();//just in case it is not initialized
getRequestParams();
initLibs(loader, isConfigsetTrusted);
luceneMatchVersion = SolrConfig.parseLuceneVersionString(getVal(IndexSchema.LUCENE_MATCH_VERSION_PARAM, true));
log.info("Using Lucene MatchVersion: {}", luceneMatchVersion);
String indexConfigPrefix;
// Old indexDefaults and mainIndex sections are deprecated and fails fast for luceneMatchVersion=>LUCENE_4_0_0.
// For older solrconfig.xml's we allow the old sections, but never mixed with the new <indexConfig>
boolean hasDeprecatedIndexConfig = (getNode("indexDefaults", false) != null) || (getNode("mainIndex", false) != null);
if (hasDeprecatedIndexConfig) {
throw new SolrException(ErrorCode.FORBIDDEN, "<indexDefaults> and <mainIndex> configuration sections are discontinued. Use <indexConfig> instead.");
} else {
indexConfigPrefix = "indexConfig";
}
assertWarnOrFail("The <nrtMode> config has been discontinued and NRT mode is always used by Solr." +
" This config will be removed in future versions.", getNode(indexConfigPrefix + "/nrtMode", false) == null,
true
);
assertWarnOrFail("Solr no longer supports forceful unlocking via the 'unlockOnStartup' option. "+
"This is no longer necessary for the default lockType except in situations where "+
"it would be dangerous and should not be done. For other lockTypes and/or "+
"directoryFactory options it may also be dangerous and users must resolve "+
"problematic locks manually.",
null == getNode(indexConfigPrefix + "/unlockOnStartup", false),
true // 'fail' in trunk
);
// Parse indexConfig section, using mainIndex as backup in case old config is used
indexConfig = new SolrIndexConfig(this, "indexConfig", null);
booleanQueryMaxClauseCount = getInt("query/maxBooleanClauses", IndexSearcher.getMaxClauseCount());
if (IndexSearcher.getMaxClauseCount() < booleanQueryMaxClauseCount) {
log.warn("solrconfig.xml: <maxBooleanClauses> of {} is greater than global limit of {} {}"
, booleanQueryMaxClauseCount, IndexSearcher.getMaxClauseCount()
, "and will have no effect set 'maxBooleanClauses' in solr.xml to increase global limit");
}
// Warn about deprecated / discontinued parameters
// boolToFilterOptimizer has had no effect since 3.1
if (get("query/boolTofilterOptimizer", null) != null)
log.warn("solrconfig.xml: <boolTofilterOptimizer> is currently not implemented and has no effect.");
if (get("query/HashDocSet", null) != null)
log.warn("solrconfig.xml: <HashDocSet> is deprecated and no longer used.");
// TODO: Old code - in case somebody wants to re-enable. Also see SolrIndexSearcher#search()
// filtOptEnabled = getBool("query/boolTofilterOptimizer/@enabled", false);
// filtOptCacheSize = getInt("query/boolTofilterOptimizer/@cacheSize",32);
// filtOptThreshold = getFloat("query/boolTofilterOptimizer/@threshold",.05f);
useFilterForSortedQuery = getBool("query/useFilterForSortedQuery", false);
queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1));
queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE);
enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false);
useRangeVersionsForPeerSync = getBool("peerSync/useRangeVersions", true);
filterCacheConfig = CacheConfig.getConfig(this, "query/filterCache");
queryResultCacheConfig = CacheConfig.getConfig(this, "query/queryResultCache");
documentCacheConfig = CacheConfig.getConfig(this, "query/documentCache");
CacheConfig conf = CacheConfig.getConfig(this, "query/fieldValueCache");
if (conf == null) {
Map<String, String> args = new HashMap<>();
args.put(NAME, "fieldValueCache");
args.put("size", "10000");
args.put("initialSize", "10");
args.put("showItems", "-1");
conf = new CacheConfig(CaffeineCache.class, args, null);
}
fieldValueCacheConfig = conf;
useColdSearcher = getBool("query/useColdSearcher", false);
dataDir = get("dataDir", null);
if (dataDir != null && dataDir.length() == 0) dataDir = null;
org.apache.solr.search.SolrIndexSearcher.initRegenerators(this);
if (get("jmx", null) != null) {
log.warn("solrconfig.xml: <jmx> is no longer supported, use solr.xml:/metrics/reporter section instead");
}
httpCachingConfig = new HttpCachingConfig(this);
maxWarmingSearchers = getInt("query/maxWarmingSearchers", 1);
slowQueryThresholdMillis = getInt("query/slowQueryThresholdMillis", -1);
for (SolrPluginInfo plugin : plugins) loadPluginInfo(plugin);
Map<String, CacheConfig> userCacheConfigs = CacheConfig.getMultipleConfigs(this, "query/cache");
List<PluginInfo> caches = getPluginInfos(SolrCache.class.getName());
if (!caches.isEmpty()) {
for (PluginInfo c : caches) {
userCacheConfigs.put(c.name, CacheConfig.getConfig(this, "cache", c.attributes, null));
}
}
this.userCacheConfigs = Collections.unmodifiableMap(userCacheConfigs);
updateHandlerInfo = loadUpdatehandlerInfo();
multipartUploadLimitKB = getInt(
"requestDispatcher/requestParsers/@multipartUploadLimitInKB", Integer.MAX_VALUE);
if (multipartUploadLimitKB == -1) multipartUploadLimitKB = Integer.MAX_VALUE;
formUploadLimitKB = getInt(
"requestDispatcher/requestParsers/@formdataUploadLimitInKB", Integer.MAX_VALUE);
if (formUploadLimitKB == -1) formUploadLimitKB = Integer.MAX_VALUE;
enableRemoteStreams = getBool(
"requestDispatcher/requestParsers/@enableRemoteStreaming", false);
enableStreamBody = getBool(
"requestDispatcher/requestParsers/@enableStreamBody", false);
handleSelect = getBool(
"requestDispatcher/@handleSelect", false);
addHttpRequestToContext = getBool(
"requestDispatcher/requestParsers/@addHttpRequestToContext", false);
List<PluginInfo> argsInfos = getPluginInfos(InitParams.class.getName());
if (argsInfos != null) {
Map<String, InitParams> argsMap = new HashMap<>();
for (PluginInfo p : argsInfos) {
InitParams args = new InitParams(p);
argsMap.put(args.name == null ? String.valueOf(args.hashCode()) : args.name, args);
}
this.initParams = Collections.unmodifiableMap(argsMap);
}
solrRequestParsers = new SolrRequestParsers(this);
log.debug("Loaded SolrConfig: {}", name);
}
private static final AtomicBoolean versionWarningAlreadyLogged = new AtomicBoolean(false);
public static final Version parseLuceneVersionString(final String matchVersion) {
final Version version;
try {
version = Version.parseLeniently(matchVersion);
} catch (ParseException pe) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Invalid luceneMatchVersion. Should be of the form 'V.V.V' (e.g. 4.8.0)", pe);
}
if (version == Version.LATEST && !versionWarningAlreadyLogged.getAndSet(true)) {
log.warn("You should not use LATEST as luceneMatchVersion property: "
+ "if you use this setting, and then Solr upgrades to a newer release of Lucene, "
+ "sizable changes may happen. If precise back compatibility is important "
+ "then you should instead explicitly specify an actual Lucene version.");
}
return version;
}
public static final List<SolrPluginInfo> plugins = ImmutableList.<SolrPluginInfo>builder()
.add(new SolrPluginInfo(SolrRequestHandler.class, SolrRequestHandler.TYPE, REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY))
.add(new SolrPluginInfo(QParserPlugin.class, "queryParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(Expressible.class, "expressible", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(QueryResponseWriter.class, "queryResponseWriter", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY))
.add(new SolrPluginInfo(ValueSourceParser.class, "valueSourceParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(TransformerFactory.class, "transformer", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(SearchComponent.class, "searchComponent", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(UpdateRequestProcessorFactory.class, "updateProcessor", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
.add(new SolrPluginInfo(SolrCache.class, "cache", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK))
// TODO: WTF is up with queryConverter???
// it apparently *only* works as a singleton? - SOLR-4304
// and even then -- only if there is a single SpellCheckComponent
// because of queryConverter.setIndexAnalyzer
.add(new SolrPluginInfo(QueryConverter.class, "queryConverter", REQUIRE_NAME, REQUIRE_CLASS))
.add(new SolrPluginInfo(PluginBag.RuntimeLib.class, "runtimeLib", REQUIRE_NAME, MULTI_OK))
// this is hackish, since it picks up all SolrEventListeners,
// regardless of when/how/why they are used (or even if they are
// declared outside of the appropriate context) but there's no nice
// way around that in the PluginInfo framework
.add(new SolrPluginInfo(InitParams.class, InitParams.TYPE, MULTI_OK, REQUIRE_NAME_IN_OVERLAY))
.add(new SolrPluginInfo(SolrEventListener.class, "//listener", REQUIRE_CLASS, MULTI_OK, REQUIRE_NAME_IN_OVERLAY))
.add(new SolrPluginInfo(DirectoryFactory.class, "directoryFactory", REQUIRE_CLASS))
.add(new SolrPluginInfo(RecoveryStrategy.Builder.class, "recoveryStrategy"))
.add(new SolrPluginInfo(IndexDeletionPolicy.class, "indexConfig/deletionPolicy", REQUIRE_CLASS))
.add(new SolrPluginInfo(CodecFactory.class, "codecFactory", REQUIRE_CLASS))
.add(new SolrPluginInfo(IndexReaderFactory.class, "indexReaderFactory", REQUIRE_CLASS))
.add(new SolrPluginInfo(UpdateRequestProcessorChain.class, "updateRequestProcessorChain", MULTI_OK))
.add(new SolrPluginInfo(UpdateLog.class, "updateHandler/updateLog"))
.add(new SolrPluginInfo(IndexSchemaFactory.class, "schemaFactory", REQUIRE_CLASS))
.add(new SolrPluginInfo(RestManager.class, "restManager"))
.add(new SolrPluginInfo(StatsCache.class, "statsCache", REQUIRE_CLASS))
.build();
public static final Map<String, SolrPluginInfo> classVsSolrPluginInfo;
static {
Map<String, SolrPluginInfo> map = new HashMap<>();
for (SolrPluginInfo plugin : plugins) map.put(plugin.clazz.getName(), plugin);
classVsSolrPluginInfo = Collections.unmodifiableMap(map);
}
public static class SolrPluginInfo {
@SuppressWarnings({"rawtypes"})
public final Class clazz;
public final String tag;
public final Set<PluginOpts> options;
@SuppressWarnings({"unchecked", "rawtypes"})
private SolrPluginInfo(Class clz, String tag, PluginOpts... opts) {
this.clazz = clz;
this.tag = tag;
this.options = opts == null ? Collections.EMPTY_SET : EnumSet.of(NOOP, opts);
}
public String getCleanTag() {
return tag.replaceAll("/", "");
}
public String getTagCleanLower() {
return getCleanTag().toLowerCase(Locale.ROOT);
}
}
@SuppressWarnings({"unchecked", "rawtypes"})
public static ConfigOverlay getConfigOverlay(SolrResourceLoader loader) {
InputStream in = null;
InputStreamReader isr = null;
try {
try {
in = loader.openResource(ConfigOverlay.RESOURCE_NAME);
} catch (IOException e) {
// TODO: we should be explicitly looking for file not found exceptions
// and logging if it's not the expected IOException
// hopefully no problem, assume no overlay.json file
return new ConfigOverlay(Collections.EMPTY_MAP, -1);
}
int version = 0; // will be always 0 for file based resourceLoader
if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) {
version = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion();
log.debug("Config overlay loaded. version : {} ", version);
}
Map m = (Map) fromJSON(in);
return new ConfigOverlay(m, version);
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading config overlay", e);
} finally {
IOUtils.closeQuietly(isr);
IOUtils.closeQuietly(in);
}
}
private Map<String, InitParams> initParams = Collections.emptyMap();
public Map<String, InitParams> getInitParams() {
return initParams;
}
protected UpdateHandlerInfo loadUpdatehandlerInfo() {
return new UpdateHandlerInfo(get("updateHandler/@class", null),
getInt("updateHandler/autoCommit/maxDocs", -1),
getInt("updateHandler/autoCommit/maxTime", -1),
convertHeapOptionStyleConfigStringToBytes(get("updateHandler/autoCommit/maxSize", "")),
getBool("updateHandler/indexWriter/closeWaitsForMerges", true),
getBool("updateHandler/autoCommit/openSearcher", true),
getInt("updateHandler/autoSoftCommit/maxDocs", -1),
getInt("updateHandler/autoSoftCommit/maxTime", -1),
getBool("updateHandler/commitWithin/softCommit", true));
}
/**
* Converts a Java heap option-like config string to bytes. Valid suffixes are: 'k', 'm', 'g'
* (case insensitive). If there is no suffix, the default unit is bytes.
* For example, 50k = 50KB, 20m = 20MB, 4g = 4GB, 300 = 300 bytes
* @param configStr the config setting to parse
* @return the size, in bytes. -1 if the given config string is empty
*/
protected static long convertHeapOptionStyleConfigStringToBytes(String configStr) {
if (configStr.isEmpty()) {
return -1;
}
long multiplier = 1;
String numericValueStr = configStr;
char suffix = Character.toLowerCase(configStr.charAt(configStr.length() - 1));
if (Character.isLetter(suffix)) {
if (suffix == 'k') {
multiplier = FileUtils.ONE_KB;
}
else if (suffix == 'm') {
multiplier = FileUtils.ONE_MB;
}
else if (suffix == 'g') {
multiplier = FileUtils.ONE_GB;
} else {
throw new RuntimeException("Invalid suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). "
+ "No suffix means the amount is in bytes. ");
}
numericValueStr = configStr.substring(0, configStr.length() - 1);
}
try {
return Long.parseLong(numericValueStr) * multiplier;
} catch (NumberFormatException e) {
throw new RuntimeException("Invalid format. The config setting should be a long with an "
+ "optional letter suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). "
+ "No suffix means the amount is in bytes.");
}
}
private void loadPluginInfo(SolrPluginInfo pluginInfo) {
boolean requireName = pluginInfo.options.contains(REQUIRE_NAME);
boolean requireClass = pluginInfo.options.contains(REQUIRE_CLASS);
List<PluginInfo> result = readPluginInfos(pluginInfo.tag, requireName, requireClass);
if (1 < result.size() && !pluginInfo.options.contains(MULTI_OK)) {
throw new SolrException
(SolrException.ErrorCode.SERVER_ERROR,
"Found " + result.size() + " configuration sections when at most "
+ "1 is allowed matching expression: " + pluginInfo.getCleanTag());
}
if (!result.isEmpty()) pluginStore.put(pluginInfo.clazz.getName(), result);
}
public List<PluginInfo> readPluginInfos(String tag, boolean requireName, boolean requireClass) {
ArrayList<PluginInfo> result = new ArrayList<>();
NodeList nodes = (NodeList) evaluate(tag, XPathConstants.NODESET);
for (int i = 0; i < nodes.getLength(); i++) {
PluginInfo pluginInfo = new PluginInfo(nodes.item(i), "[solrconfig.xml] " + tag, requireName, requireClass);
if (pluginInfo.isEnabled()) result.add(pluginInfo);
}
return result;
}
public SolrRequestParsers getRequestParsers() {
return solrRequestParsers;
}
/* The set of materialized parameters: */
public final int booleanQueryMaxClauseCount;
// SolrIndexSearcher - nutch optimizer -- Disabled since 3.1
// public final boolean filtOptEnabled;
// public final int filtOptCacheSize;
// public final float filtOptThreshold;
// SolrIndexSearcher - caches configurations
public final CacheConfig filterCacheConfig;
public final CacheConfig queryResultCacheConfig;
public final CacheConfig documentCacheConfig;
public final CacheConfig fieldValueCacheConfig;
public final Map<String, CacheConfig> userCacheConfigs;
// SolrIndexSearcher - more...
public final boolean useFilterForSortedQuery;
public final int queryResultWindowSize;
public final int queryResultMaxDocsCached;
public final boolean enableLazyFieldLoading;
public final boolean useRangeVersionsForPeerSync;
// IndexConfig settings
public final SolrIndexConfig indexConfig;
protected UpdateHandlerInfo updateHandlerInfo;
private Map<String, List<PluginInfo>> pluginStore = new LinkedHashMap<>();
public final int maxWarmingSearchers;
public final boolean useColdSearcher;
public final Version luceneMatchVersion;
protected String dataDir;
public final int slowQueryThresholdMillis; // threshold above which a query is considered slow
private final HttpCachingConfig httpCachingConfig;
public HttpCachingConfig getHttpCachingConfig() {
return httpCachingConfig;
}
public static class HttpCachingConfig implements MapSerializable {
/**
* config xpath prefix for getting HTTP Caching options
*/
private final static String CACHE_PRE
= "requestDispatcher/httpCaching/";
/**
* For extracting Expires "ttl" from <cacheControl> config
*/
private final static Pattern MAX_AGE
= Pattern.compile("\\bmax-age=(\\d+)");
@Override
public Map<String, Object> toMap(Map<String, Object> map) {
return makeMap("never304", never304,
"etagSeed", etagSeed,
"lastModFrom", lastModFrom.name().toLowerCase(Locale.ROOT),
"cacheControl", cacheControlHeader);
}
public static enum LastModFrom {
OPENTIME, DIRLASTMOD, BOGUS;
/**
* Input must not be null
*/
public static LastModFrom parse(final String s) {
try {
return valueOf(s.toUpperCase(Locale.ROOT));
} catch (Exception e) {
log.warn("Unrecognized value for lastModFrom: {}", s, e);
return BOGUS;
}
}
}
private final boolean never304;
private final String etagSeed;
private final String cacheControlHeader;
private final Long maxAge;
private final LastModFrom lastModFrom;
private HttpCachingConfig(SolrConfig conf) {
never304 = conf.getBool(CACHE_PRE + "@never304", false);
etagSeed = conf.get(CACHE_PRE + "@etagSeed", "Solr");
lastModFrom = LastModFrom.parse(conf.get(CACHE_PRE + "@lastModFrom",
"openTime"));
cacheControlHeader = conf.get(CACHE_PRE + "cacheControl", null);
Long tmp = null; // maxAge
if (null != cacheControlHeader) {
try {
final Matcher ttlMatcher = MAX_AGE.matcher(cacheControlHeader);
final String ttlStr = ttlMatcher.find() ? ttlMatcher.group(1) : null;
tmp = (null != ttlStr && !"".equals(ttlStr))
? Long.valueOf(ttlStr)
: null;
} catch (Exception e) {
log.warn("Ignoring exception while attempting to extract max-age from cacheControl config: {}"
, cacheControlHeader, e);
}
}
maxAge = tmp;
}
public boolean isNever304() {
return never304;
}
public String getEtagSeed() {
return etagSeed;
}
/**
* null if no Cache-Control header
*/
public String getCacheControlHeader() {
return cacheControlHeader;
}
/**
* null if no max age limitation
*/
public Long getMaxAge() {
return maxAge;
}
public LastModFrom getLastModFrom() {
return lastModFrom;
}
}
public static class UpdateHandlerInfo implements MapSerializable {
public final String className;
public final int autoCommmitMaxDocs, autoCommmitMaxTime,
autoSoftCommmitMaxDocs, autoSoftCommmitMaxTime;
public final long autoCommitMaxSizeBytes;
public final boolean indexWriterCloseWaitsForMerges;
public final boolean openSearcher; // is opening a new searcher part of hard autocommit?
public final boolean commitWithinSoftCommit;
/**
* @param autoCommmitMaxDocs set -1 as default
* @param autoCommmitMaxTime set -1 as default
* @param autoCommitMaxSize set -1 as default
*/
public UpdateHandlerInfo(String className, int autoCommmitMaxDocs, int autoCommmitMaxTime, long autoCommitMaxSize, boolean indexWriterCloseWaitsForMerges, boolean openSearcher,
int autoSoftCommmitMaxDocs, int autoSoftCommmitMaxTime, boolean commitWithinSoftCommit) {
this.className = className;
this.autoCommmitMaxDocs = autoCommmitMaxDocs;
this.autoCommmitMaxTime = autoCommmitMaxTime;
this.autoCommitMaxSizeBytes = autoCommitMaxSize;
this.indexWriterCloseWaitsForMerges = indexWriterCloseWaitsForMerges;
this.openSearcher = openSearcher;
this.autoSoftCommmitMaxDocs = autoSoftCommmitMaxDocs;
this.autoSoftCommmitMaxTime = autoSoftCommmitMaxTime;
this.commitWithinSoftCommit = commitWithinSoftCommit;
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public Map<String, Object> toMap(Map<String, Object> map) {
LinkedHashMap result = new LinkedHashMap();
result.put("indexWriter", makeMap("closeWaitsForMerges", indexWriterCloseWaitsForMerges));
result.put("commitWithin", makeMap("softCommit", commitWithinSoftCommit));
result.put("autoCommit", makeMap(
"maxDocs", autoCommmitMaxDocs,
"maxTime", autoCommmitMaxTime,
"openSearcher", openSearcher
));
result.put("autoSoftCommit",
makeMap("maxDocs", autoSoftCommmitMaxDocs,
"maxTime", autoSoftCommmitMaxTime));
return result;
}
}
// public Map<String, List<PluginInfo>> getUpdateProcessorChainInfo() { return updateProcessorChainInfo; }
public UpdateHandlerInfo getUpdateHandlerInfo() {
return updateHandlerInfo;
}
public String getDataDir() {
return dataDir;
}
/**
* SolrConfig keeps a repository of plugins by the type. The known interfaces are the types.
*
* @param type The key is FQN of the plugin class there are a few known types : SolrFormatter, SolrFragmenter
* SolrRequestHandler,QParserPlugin, QueryResponseWriter,ValueSourceParser,
* SearchComponent, QueryConverter, SolrEventListener, DirectoryFactory,
* IndexDeletionPolicy, IndexReaderFactory, {@link TransformerFactory}
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public List<PluginInfo> getPluginInfos(String type) {
List<PluginInfo> result = pluginStore.get(type);
SolrPluginInfo info = classVsSolrPluginInfo.get(type);
if (info != null &&
(info.options.contains(REQUIRE_NAME) || info.options.contains(REQUIRE_NAME_IN_OVERLAY))) {
Map<String, Map> infos = overlay.getNamedPlugins(info.getCleanTag());
if (!infos.isEmpty()) {
LinkedHashMap<String, PluginInfo> map = new LinkedHashMap<>();
if (result != null) for (PluginInfo pluginInfo : result) {
//just create a UUID for the time being so that map key is not null
String name = pluginInfo.name == null ?
UUID.randomUUID().toString().toLowerCase(Locale.ROOT) :
pluginInfo.name;
map.put(name, pluginInfo);
}
for (Map.Entry<String, Map> e : infos.entrySet()) {
map.put(e.getKey(), new PluginInfo(info.getCleanTag(), e.getValue()));
}
result = new ArrayList<>(map.values());
}
}
return result == null ? Collections.<PluginInfo>emptyList() : result;
}
public PluginInfo getPluginInfo(String type) {
List<PluginInfo> result = pluginStore.get(type);
if (result == null || result.isEmpty()) {
return null;
}
if (1 == result.size()) {
return result.get(0);
}
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Multiple plugins configured for type: " + type);
}
private void initLibs(SolrResourceLoader loader, boolean isConfigsetTrusted) {
// TODO Want to remove SolrResourceLoader.getInstancePath; it can be on a Standalone subclass.
// For Zk subclass, it's needed for the time being as well. We could remove that one if we remove two things
// in SolrCloud: (1) instancePath/lib and (2) solrconfig lib directives with relative paths. Can wait till 9.0.
Path instancePath = loader.getInstancePath();
List<URL> urls = new ArrayList<>();
Path libPath = instancePath.resolve("lib");
if (Files.exists(libPath)) {
try {
urls.addAll(SolrResourceLoader.getURLs(libPath));
} catch (IOException e) {
log.warn("Couldn't add files from {} to classpath: {}", libPath, e);
}
}
NodeList nodes = (NodeList) evaluate("lib", XPathConstants.NODESET);
if (nodes == null || nodes.getLength() == 0) return;
if (!isConfigsetTrusted) {
throw new SolrException(ErrorCode.UNAUTHORIZED, "The configset for this collection was uploaded without any authentication in place,"
+ " and use of <lib> is not available for collections with untrusted configsets. To use this component, re-upload the configset"
+ " after enabling authentication and authorization.");
}
for (int i = 0; i < nodes.getLength(); i++) {
Node node = nodes.item(i);
String baseDir = DOMUtil.getAttr(node, "dir");
String path = DOMUtil.getAttr(node, PATH);
if (null != baseDir) {
// :TODO: add support for a simpler 'glob' mutually exclusive of regex
Path dir = instancePath.resolve(baseDir);
String regex = DOMUtil.getAttr(node, "regex");
try {
if (regex == null)
urls.addAll(SolrResourceLoader.getURLs(dir));
else
urls.addAll(SolrResourceLoader.getFilteredURLs(dir, regex));
} catch (IOException e) {
log.warn("Couldn't add files from {} filtered by {} to classpath: {}", dir, regex, e);
}
} else if (null != path) {
final Path dir = instancePath.resolve(path);
try {
urls.add(dir.toUri().toURL());
} catch (MalformedURLException e) {
log.warn("Couldn't add file {} to classpath: {}", dir, e);
}
} else {
throw new RuntimeException("lib: missing mandatory attributes: 'dir' or 'path'");
}
}
loader.addToClassLoader(urls);
loader.reloadLuceneSPI();
}
public int getMultipartUploadLimitKB() {
return multipartUploadLimitKB;
}
public int getFormUploadLimitKB() {
return formUploadLimitKB;
}
public boolean isHandleSelect() {
return handleSelect;
}
public boolean isAddHttpRequestToContext() {
return addHttpRequestToContext;
}
public boolean isEnableRemoteStreams() {
return enableRemoteStreams;
}
public boolean isEnableStreamBody() {
return enableStreamBody;
}
@Override
public int getInt(String path) {
return getInt(path, 0);
}
@Override
public int getInt(String path, int def) {
Object val = overlay.getXPathProperty(path);
if (val != null) return Integer.parseInt(val.toString());
return super.getInt(path, def);
}
@Override
public boolean getBool(String path, boolean def) {
Object val = overlay.getXPathProperty(path);
if (val != null) return Boolean.parseBoolean(val.toString());
return super.getBool(path, def);
}
@Override
public String get(String path) {
Object val = overlay.getXPathProperty(path, true);
return val != null ? val.toString() : super.get(path);
}
@Override
public String get(String path, String def) {
Object val = overlay.getXPathProperty(path, true);
return val != null ? val.toString() : super.get(path, def);
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public Map<String, Object> toMap(Map<String, Object> result) {
if (getZnodeVersion() > -1) result.put(ZNODEVER, getZnodeVersion());
result.put(IndexSchema.LUCENE_MATCH_VERSION_PARAM, luceneMatchVersion);
result.put("updateHandler", getUpdateHandlerInfo());
Map m = new LinkedHashMap();
result.put("query", m);
m.put("useFilterForSortedQuery", useFilterForSortedQuery);
m.put("queryResultWindowSize", queryResultWindowSize);
m.put("queryResultMaxDocsCached", queryResultMaxDocsCached);
m.put("enableLazyFieldLoading", enableLazyFieldLoading);
m.put("maxBooleanClauses", booleanQueryMaxClauseCount);
for (SolrPluginInfo plugin : plugins) {
List<PluginInfo> infos = getPluginInfos(plugin.clazz.getName());
if (infos == null || infos.isEmpty()) continue;
String tag = plugin.getCleanTag();
tag = tag.replace("/", "");
if (plugin.options.contains(PluginOpts.REQUIRE_NAME)) {
LinkedHashMap items = new LinkedHashMap();
for (PluginInfo info : infos) {
//TODO remove after fixing https://issues.apache.org/jira/browse/SOLR-13706
if (info.type.equals("searchComponent") && info.name.equals("highlight")) continue;
items.put(info.name, info);
}
for (Map.Entry e : overlay.getNamedPlugins(plugin.tag).entrySet()) items.put(e.getKey(), e.getValue());
result.put(tag, items);
} else {
if (plugin.options.contains(MULTI_OK)) {
ArrayList<MapSerializable> l = new ArrayList<>();
for (PluginInfo info : infos) l.add(info);
result.put(tag, l);
} else {
result.put(tag, infos.get(0));
}
}
}
addCacheConfig(m, filterCacheConfig, queryResultCacheConfig, documentCacheConfig, fieldValueCacheConfig);
m = new LinkedHashMap();
result.put("requestDispatcher", m);
m.put("handleSelect", handleSelect);
if (httpCachingConfig != null) m.put("httpCaching", httpCachingConfig);
m.put("requestParsers", makeMap("multipartUploadLimitKB", multipartUploadLimitKB,
"formUploadLimitKB", formUploadLimitKB,
"addHttpRequestToContext", addHttpRequestToContext));
if (indexConfig != null) result.put("indexConfig", indexConfig);
m = new LinkedHashMap();
result.put("peerSync", m);
m.put("useRangeVersions", useRangeVersionsForPeerSync);
//TODO there is more to add
return result;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private void addCacheConfig(Map queryMap, CacheConfig... cache) {
if (cache == null) return;
for (CacheConfig config : cache) if (config != null) queryMap.put(config.getNodeName(), config);
}
@Override
public Properties getSubstituteProperties() {
Map<String, Object> p = getOverlay().getUserProps();
if (p == null || p.isEmpty()) return super.getSubstituteProperties();
Properties result = new Properties(super.getSubstituteProperties());
result.putAll(p);
return result;
}
private ConfigOverlay overlay;
public ConfigOverlay getOverlay() {
if (overlay == null) {
overlay = getConfigOverlay(getResourceLoader());
}
return overlay;
}
public RequestParams getRequestParams() {
if (requestParams == null) {
return refreshRequestParams();
}
return requestParams;
}
public RequestParams refreshRequestParams() {
requestParams = RequestParams.getFreshRequestParams(getResourceLoader(), requestParams);
if (log.isDebugEnabled()) {
log.debug("current version of requestparams : {}", requestParams.getZnodeVersion());
}
return requestParams;
}
}
| 1 | 34,985 | Should we validate that this is between 0 and 100? | apache-lucene-solr | java |
@@ -332,7 +332,7 @@ func (c *cliApp) help() {
// quit stops cli and client commands and exits application
func (c *cliApp) quit() {
- stop := utils.SoftKiller(c.Kill)
+ stop := utils.HardKiller(c.Kill)
stop()
}
| 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package cli
import (
"fmt"
"log"
"path/filepath"
"strconv"
"strings"
"github.com/chzyer/readline"
"github.com/mysteriumnetwork/node/cmd"
"github.com/mysteriumnetwork/node/metadata"
tequilapi_client "github.com/mysteriumnetwork/node/tequilapi/client"
"github.com/mysteriumnetwork/node/tequilapi/endpoints"
"github.com/mysteriumnetwork/node/utils"
"github.com/urfave/cli"
)
const cliCommandName = "cli"
// NewCommand constructs CLI based Mysterium UI with possibility to control quiting
func NewCommand() *cli.Command {
var di cmd.Dependencies
return &cli.Command{
Name: cliCommandName,
Usage: "Starts a CLI client with a Tequilapi",
Action: func(ctx *cli.Context) error {
errorChannel := make(chan error)
if err := di.Bootstrap(cmd.ParseFlagsNode(ctx)); err != nil {
return err
}
go func() { errorChannel <- di.Node.Wait() }()
nodeOptions := cmd.ParseFlagsNode(ctx)
cmdCLI := &cliApp{
historyFile: filepath.Join(nodeOptions.Directories.Data, ".cli_history"),
tequilapi: tequilapi_client.NewClient(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort),
}
go func() { errorChannel <- cmdCLI.Run() }()
cmd.RegisterSignalCallback(utils.SoftKiller(cmdCLI.Kill))
cmd.RegisterSignalCallback(utils.SoftKiller(di.Shutdown))
return <-errorChannel
},
After: func(ctx *cli.Context) error {
return di.Shutdown()
},
}
}
// cliApp describes CLI based Mysterium UI
type cliApp struct {
historyFile string
tequilapi *tequilapi_client.Client
fetchedProposals []tequilapi_client.ProposalDTO
completer *readline.PrefixCompleter
reader *readline.Instance
}
const redColor = "\033[31m%s\033[0m"
const identityDefaultPassphrase = ""
const statusConnected = "Connected"
var versionSummary = metadata.VersionAsSummary(metadata.LicenseCopyright(
"type 'license warranty'",
"type 'license conditions'",
))
// Run runs CLI interface synchronously, in the same thread while blocking it
func (c *cliApp) Run() (err error) {
fmt.Println(versionSummary)
c.fetchedProposals = c.fetchProposals()
c.completer = newAutocompleter(c.tequilapi, c.fetchedProposals)
c.reader, err = readline.NewEx(&readline.Config{
Prompt: fmt.Sprintf(redColor, "» "),
HistoryFile: c.historyFile,
AutoComplete: c.completer,
InterruptPrompt: "^C",
EOFPrompt: "exit",
})
if err != nil {
return err
}
// TODO Should overtake output of CommandRun
log.SetOutput(c.reader.Stderr())
for {
line, err := c.reader.Readline()
if err == readline.ErrInterrupt && len(line) > 0 {
continue
} else if err != nil {
c.quit()
return err
}
c.handleActions(line)
}
}
// Kill stops cli
func (c *cliApp) Kill() error {
c.reader.Clean()
return c.reader.Close()
}
func (c *cliApp) handleActions(line string) {
line = strings.TrimSpace(line)
staticCmds := []struct {
command string
handler func()
}{
{"exit", c.quit},
{"quit", c.quit},
{"help", c.help},
{"status", c.status},
{"proposals", c.proposals},
{"healthcheck", c.healthcheck},
{"ip", c.ip},
{"disconnect", c.disconnect},
{"stop", c.stopClient},
}
argCmds := []struct {
command string
handler func(argsString string)
}{
{command: "connect", handler: c.connect},
{command: "unlock", handler: c.unlock},
{command: "identities", handler: c.identities},
{command: "version", handler: c.version},
{command: "license", handler: c.license},
{command: "registration", handler: c.registration},
}
for _, cmd := range staticCmds {
if line == cmd.command {
cmd.handler()
return
}
}
for _, cmd := range argCmds {
if strings.HasPrefix(line, cmd.command) {
argsString := strings.TrimSpace(line[len(cmd.command):])
cmd.handler(argsString)
return
}
}
if len(line) > 0 {
c.help()
}
}
func (c *cliApp) connect(argsString string) {
options := strings.Fields(argsString)
if len(options) < 2 {
info("Please type in the provider identity. Connect <consumer-identity> <provider-identity> [disable-kill-switch]")
return
}
consumerID, providerID := options[0], options[1]
var disableKill bool
var err error
if len(options) > 2 {
disableKillStr := options[2]
disableKill, err = strconv.ParseBool(disableKillStr)
if err != nil {
info("Please use true / false for <disable-kill-switch>")
return
}
}
connectOptions := endpoints.ConnectOptions{DisableKillSwitch: disableKill}
if consumerID == "new" {
id, err := c.tequilapi.NewIdentity(identityDefaultPassphrase)
if err != nil {
warn(err)
return
}
consumerID = id.Address
success("New identity created:", consumerID)
}
status("CONNECTING", "from:", consumerID, "to:", providerID)
_, err = c.tequilapi.Connect(consumerID, providerID, connectOptions)
if err != nil {
warn(err)
return
}
success("Connected.")
}
func (c *cliApp) unlock(argsString string) {
unlockSignature := "Unlock <identity> [passphrase]"
if len(argsString) == 0 {
info("Press tab to select identity.", unlockSignature)
return
}
args := strings.Fields(argsString)
var identity, passphrase string
if len(args) == 1 {
identity, passphrase = args[0], ""
} else if len(args) == 2 {
identity, passphrase = args[0], args[1]
} else {
info("Please type in identity and optional passphrase.", unlockSignature)
return
}
info("Unlocking", identity)
err := c.tequilapi.Unlock(identity, passphrase)
if err != nil {
warn(err)
return
}
success(fmt.Sprintf("Identity %s unlocked.", identity))
}
func (c *cliApp) disconnect() {
err := c.tequilapi.Disconnect()
if err != nil {
warn(err)
return
}
success("Disconnected.")
}
func (c *cliApp) status() {
status, err := c.tequilapi.Status()
if err != nil {
warn(err)
} else {
info("Status:", status.Status)
info("SID:", status.SessionID)
}
if status.Status == statusConnected {
statistics, err := c.tequilapi.ConnectionStatistics()
if err != nil {
warn(err)
} else {
info(fmt.Sprintf("Connection duration: %ds", statistics.Duration))
info("Bytes sent:", statistics.BytesSent)
info("Bytes received:", statistics.BytesReceived)
}
}
}
func (c *cliApp) healthcheck() {
healthcheck, err := c.tequilapi.Healthcheck()
if err != nil {
warn(err)
return
}
info(fmt.Sprintf("Uptime: %v", healthcheck.Uptime))
info(fmt.Sprintf("Process: %v", healthcheck.Process))
info(fmt.Sprintf("Version: %v", healthcheck.Version))
buildString := metadata.FormatString(healthcheck.BuildInfo.Commit, healthcheck.BuildInfo.Branch, healthcheck.BuildInfo.BuildNumber)
info(buildString)
}
func (c *cliApp) proposals() {
proposals := c.fetchProposals()
c.fetchedProposals = proposals
info(fmt.Sprintf("Found %v proposals", len(proposals)))
for _, proposal := range proposals {
country := proposal.ServiceDefinition.LocationOriginate.Country
if country == "" {
country = "Unknown"
}
msg := fmt.Sprintf("- provider id: %v, proposal id: %v, country: %v", proposal.ProviderID, proposal.ID, country)
info(msg)
}
}
func (c *cliApp) fetchProposals() []tequilapi_client.ProposalDTO {
proposals, err := c.tequilapi.Proposals()
if err != nil {
warn(err)
return []tequilapi_client.ProposalDTO{}
}
return proposals
}
func (c *cliApp) ip() {
ip, err := c.tequilapi.GetIP()
if err != nil {
warn(err)
return
}
info("IP:", ip)
}
func (c *cliApp) help() {
info("Mysterium CLI tequilapi commands:")
fmt.Println(c.completer.Tree(" "))
}
// quit stops cli and client commands and exits application
func (c *cliApp) quit() {
stop := utils.SoftKiller(c.Kill)
stop()
}
func (c *cliApp) identities(argsString string) {
const usage = "identities command:\n list\n new [passphrase]"
if len(argsString) == 0 {
info(usage)
return
}
args := strings.Fields(argsString)
if len(args) < 1 {
info(usage)
return
}
action := args[0]
if action == "list" {
if len(args) > 1 {
info(usage)
return
}
ids, err := c.tequilapi.GetIdentities()
if err != nil {
fmt.Println("Error occurred:", err)
return
}
for _, id := range ids {
status("+", id.Address)
}
return
}
if action == "new" {
var passphrase string
if len(args) == 1 {
passphrase = identityDefaultPassphrase
} else if len(args) == 2 {
passphrase = args[1]
} else {
info(usage)
return
}
id, err := c.tequilapi.NewIdentity(passphrase)
if err != nil {
warn(err)
return
}
success("New identity created:", id.Address)
}
}
func (c *cliApp) registration(argsString string) {
status, err := c.tequilapi.IdentityRegistrationStatus(argsString)
if err != nil {
warn("Something went wrong: ", err)
return
}
if status.Registered {
info("Already registered")
return
}
info("Identity is not registered yet. In order to do that - please call payments contract with the following data")
info("Public key: part1 ->", status.PublicKey.Part1)
info(" part2 ->", status.PublicKey.Part2)
info("Signature: S ->", status.Signature.S)
info(" R ->", status.Signature.R)
info(" V ->", status.Signature.V)
info("OR proceed with direct link:")
infof(" https://wallet.mysterium.network/?part1=%s&part2=%s&s=%s&r=%s&v=%d\n",
status.PublicKey.Part1,
status.PublicKey.Part2,
status.Signature.S,
status.Signature.R,
status.Signature.V)
}
func (c *cliApp) stopClient() {
err := c.tequilapi.Stop()
if err != nil {
warn("Cannot stop client:", err)
}
success("Client stopped")
}
func (c *cliApp) version(argsString string) {
fmt.Println(versionSummary)
}
func (c *cliApp) license(argsString string) {
if argsString == "warranty" {
fmt.Print(metadata.LicenseWarranty)
} else if argsString == "conditions" {
fmt.Print(metadata.LicenseConditions)
} else {
info("identities command:\n warranty\n conditions")
}
}
func getIdentityOptionList(tequilapi *tequilapi_client.Client) func(string) []string {
return func(line string) []string {
identities := []string{"new"}
ids, err := tequilapi.GetIdentities()
if err != nil {
warn(err)
return identities
}
for _, id := range ids {
identities = append(identities, id.Address)
}
return identities
}
}
func getProposalOptionList(proposals []tequilapi_client.ProposalDTO) func(string) []string {
return func(line string) []string {
var providerIDS []string
for _, proposal := range proposals {
providerIDS = append(providerIDS, proposal.ProviderID)
}
return providerIDS
}
}
func newAutocompleter(tequilapi *tequilapi_client.Client, proposals []tequilapi_client.ProposalDTO) *readline.PrefixCompleter {
return readline.NewPrefixCompleter(
readline.PcItem(
"connect",
readline.PcItemDynamic(
getIdentityOptionList(tequilapi),
readline.PcItemDynamic(
getProposalOptionList(proposals),
),
),
),
readline.PcItem(
"identities",
readline.PcItem("new"),
readline.PcItem("list"),
),
readline.PcItem("status"),
readline.PcItem("healthcheck"),
readline.PcItem("proposals"),
readline.PcItem("ip"),
readline.PcItem("disconnect"),
readline.PcItem("help"),
readline.PcItem("quit"),
readline.PcItem("stop"),
readline.PcItem(
"unlock",
readline.PcItemDynamic(
getIdentityOptionList(tequilapi),
),
),
readline.PcItem(
"license",
readline.PcItem("warranty"),
readline.PcItem("conditions"),
),
readline.PcItem(
"registration",
readline.PcItemDynamic(
getIdentityOptionList(tequilapi),
),
),
)
}
| 1 | 12,367 | `HardKiller` doing `os.Exit()` without proper shutting down other dependencies. I think there should be a better approach for this. | mysteriumnetwork-node | go |
@@ -136,16 +136,12 @@ const struct wlr_gles2_pixel_format *get_gles2_format_from_gl(
return NULL;
}
-const uint32_t *get_gles2_shm_formats(const struct wlr_gles2_renderer *renderer,
- size_t *len) {
- static uint32_t shm_formats[sizeof(formats) / sizeof(formats[0])];
- size_t j = 0;
+void init_gles2_data_ptr_formats(struct wlr_gles2_renderer *renderer) {
for (size_t i = 0; i < sizeof(formats) / sizeof(formats[0]); i++) {
if (!is_gles2_pixel_format_supported(renderer, &formats[i])) {
continue;
}
- shm_formats[j++] = formats[i].drm_format;
+ wlr_drm_format_set_add(&renderer->data_ptr_texture_formats,
+ formats[i].drm_format, DRM_FORMAT_MOD_LINEAR);
}
- *len = j;
- return shm_formats;
} | 1 | #include <drm_fourcc.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
#include "render/gles2.h"
/*
* The DRM formats are little endian while the GL formats are big endian,
* so DRM_FORMAT_ARGB8888 is actually compatible with GL_BGRA_EXT.
*/
static const struct wlr_gles2_pixel_format formats[] = {
{
.drm_format = DRM_FORMAT_ARGB8888,
.gl_format = GL_BGRA_EXT,
.gl_type = GL_UNSIGNED_BYTE,
.has_alpha = true,
},
{
.drm_format = DRM_FORMAT_XRGB8888,
.gl_format = GL_BGRA_EXT,
.gl_type = GL_UNSIGNED_BYTE,
.has_alpha = false,
},
{
.drm_format = DRM_FORMAT_XBGR8888,
.gl_format = GL_RGBA,
.gl_type = GL_UNSIGNED_BYTE,
.has_alpha = false,
},
{
.drm_format = DRM_FORMAT_ABGR8888,
.gl_format = GL_RGBA,
.gl_type = GL_UNSIGNED_BYTE,
.has_alpha = true,
},
{
.drm_format = DRM_FORMAT_BGR888,
.gl_format = GL_RGB,
.gl_type = GL_UNSIGNED_BYTE,
.has_alpha = false,
},
#if WLR_LITTLE_ENDIAN
{
.drm_format = DRM_FORMAT_RGBX4444,
.gl_format = GL_RGBA,
.gl_type = GL_UNSIGNED_SHORT_4_4_4_4,
.has_alpha = false,
},
{
.drm_format = DRM_FORMAT_RGBA4444,
.gl_format = GL_RGBA,
.gl_type = GL_UNSIGNED_SHORT_4_4_4_4,
.has_alpha = true,
},
{
.drm_format = DRM_FORMAT_RGBX5551,
.gl_format = GL_RGBA,
.gl_type = GL_UNSIGNED_SHORT_5_5_5_1,
.has_alpha = false,
},
{
.drm_format = DRM_FORMAT_RGBA5551,
.gl_format = GL_RGBA,
.gl_type = GL_UNSIGNED_SHORT_5_5_5_1,
.has_alpha = true,
},
{
.drm_format = DRM_FORMAT_RGB565,
.gl_format = GL_RGB,
.gl_type = GL_UNSIGNED_SHORT_5_6_5,
.has_alpha = false,
},
{
.drm_format = DRM_FORMAT_XBGR2101010,
.gl_format = GL_RGBA,
.gl_type = GL_UNSIGNED_INT_2_10_10_10_REV_EXT,
.has_alpha = false,
},
{
.drm_format = DRM_FORMAT_ABGR2101010,
.gl_format = GL_RGBA,
.gl_type = GL_UNSIGNED_INT_2_10_10_10_REV_EXT,
.has_alpha = true,
},
{
.drm_format = DRM_FORMAT_XBGR16161616F,
.gl_format = GL_RGBA,
.gl_type = GL_HALF_FLOAT_OES,
.has_alpha = false,
},
{
.drm_format = DRM_FORMAT_ABGR16161616F,
.gl_format = GL_RGBA,
.gl_type = GL_HALF_FLOAT_OES,
.has_alpha = true,
},
#endif
};
// TODO: more pixel formats
bool is_gles2_pixel_format_supported(const struct wlr_gles2_renderer *renderer,
const struct wlr_gles2_pixel_format *format) {
if (format->gl_type == GL_UNSIGNED_INT_2_10_10_10_REV_EXT
&& !renderer->exts.EXT_texture_type_2_10_10_10_REV) {
return false;
}
if (format->gl_type == GL_HALF_FLOAT_OES
&& !renderer->exts.OES_texture_half_float_linear) {
return false;
}
if (format->gl_format == GL_BGRA_EXT
&& !renderer->exts.EXT_read_format_bgra) {
return false;
}
return true;
}
const struct wlr_gles2_pixel_format *get_gles2_format_from_drm(uint32_t fmt) {
for (size_t i = 0; i < sizeof(formats) / sizeof(*formats); ++i) {
if (formats[i].drm_format == fmt) {
return &formats[i];
}
}
return NULL;
}
const struct wlr_gles2_pixel_format *get_gles2_format_from_gl(
GLint gl_format, GLint gl_type, bool alpha) {
for (size_t i = 0; i < sizeof(formats) / sizeof(*formats); ++i) {
if (formats[i].gl_format == gl_format &&
formats[i].gl_type == gl_type &&
formats[i].has_alpha == alpha) {
return &formats[i];
}
}
return NULL;
}
const uint32_t *get_gles2_shm_formats(const struct wlr_gles2_renderer *renderer,
size_t *len) {
static uint32_t shm_formats[sizeof(formats) / sizeof(formats[0])];
size_t j = 0;
for (size_t i = 0; i < sizeof(formats) / sizeof(formats[0]); i++) {
if (!is_gles2_pixel_format_supported(renderer, &formats[i])) {
continue;
}
shm_formats[j++] = formats[i].drm_format;
}
*len = j;
return shm_formats;
}
| 1 | 18,397 | Why isnt the return value checked? | swaywm-wlroots | c |
@@ -28,12 +28,15 @@ package util
import (
"fmt"
- "k8s.io/apimachinery/pkg/util/json"
"strings"
+
+ "k8s.io/apimachinery/pkg/util/json"
)
// GetNestedField returns a nested field from the provided map
func GetNestedField(obj map[string]interface{}, fields ...string) interface{} {
+ fmt.Println("obj", obj)
+ fmt.Println("fields", fields)
var val interface{} = obj
for _, field := range fields {
if _, ok := val.(map[string]interface{}); !ok { | 1 | /*
Copyright 2015 The Kubernetes Authors.
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package util provides functions based on k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
// They are copied here to make them exported.
//
// TODO
// Check if it makes sense to import the entire unstructured package of
// k8s.io/apimachinery/pkg/apis/meta/v1/unstructured versus. copying
//
// TODO
// Move to maya/pkg/unstructured/v1alpha1 as helpers.go
package util
import (
"fmt"
"k8s.io/apimachinery/pkg/util/json"
"strings"
)
// GetNestedField returns a nested field from the provided map
func GetNestedField(obj map[string]interface{}, fields ...string) interface{} {
var val interface{} = obj
for _, field := range fields {
if _, ok := val.(map[string]interface{}); !ok {
return nil
}
val = val.(map[string]interface{})[field]
}
return val
}
// GetNestedFieldInto converts a nested field to requested type from the provided map
func GetNestedFieldInto(out interface{}, obj map[string]interface{}, fields ...string) error {
objMap := GetNestedField(obj, fields...)
if objMap == nil {
// If field has no value, leave `out` as is.
return nil
}
// Decode into the requested output type.
data, err := json.Marshal(objMap)
if err != nil {
return fmt.Errorf("can't encode nested field %v: %v", strings.Join(fields, "."), err)
}
if err := json.Unmarshal(data, out); err != nil {
return fmt.Errorf("can't decode nested field %v into type %T: %v", strings.Join(fields, "."), out, err)
}
return nil
}
// GetNestedString returns a nested string from the provided map
func GetNestedString(obj map[string]interface{}, fields ...string) string {
if obj == nil {
return ""
}
if str, ok := GetNestedField(obj, fields...).(string); ok {
return str
}
return ""
}
// GetNestedArray returns an nested array from the provided map
func GetNestedArray(obj map[string]interface{}, fields ...string) []interface{} {
if arr, ok := GetNestedField(obj, fields...).([]interface{}); ok {
return arr
}
return nil
}
// GetNestedInt64 returns an nested int64 from the provided map
func GetNestedInt64(obj map[string]interface{}, fields ...string) int64 {
if str, ok := GetNestedField(obj, fields...).(int64); ok {
return str
}
return 0
}
// GetNestedInt64Pointer returns a nested int64 pointer from the provided map
func GetNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 {
nested := GetNestedField(obj, fields...)
switch n := nested.(type) {
case int64:
return &n
case *int64:
return n
default:
return nil
}
}
// GetNestedSlice returns a nested slice from the provided map
func GetNestedSlice(obj map[string]interface{}, fields ...string) []string {
if m, ok := GetNestedField(obj, fields...).([]interface{}); ok {
strSlice := make([]string, 0, len(m))
for _, v := range m {
if str, ok := v.(string); ok {
strSlice = append(strSlice, str)
}
}
return strSlice
}
return nil
}
// GetNestedMap returns a nested map from the provided map
func GetNestedMap(obj map[string]interface{}, fields ...string) map[string]string {
if m, ok := GetNestedField(obj, fields...).(map[string]interface{}); ok {
strMap := make(map[string]string, len(m))
for k, v := range m {
if str, ok := v.(string); ok {
strMap[k] = str
}
}
return strMap
}
return nil
}
// SetNestedField sets a nested field into the provided map
func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) {
if len(fields) == 0 || obj == nil {
return
}
m := obj
if len(fields) > 1 {
for _, field := range fields[0 : len(fields)-1] {
if _, ok := m[field].(map[string]interface{}); !ok {
m[field] = make(map[string]interface{})
}
m = m[field].(map[string]interface{})
}
}
m[fields[len(fields)-1]] = value
}
// DeleteNestedField deletes a nested field from the provided map
func DeleteNestedField(obj map[string]interface{}, fields ...string) {
if len(fields) == 0 || obj == nil {
return
}
m := obj
if len(fields) > 1 {
for _, field := range fields[0 : len(fields)-1] {
if _, ok := m[field].(map[string]interface{}); !ok {
m[field] = make(map[string]interface{})
}
m = m[field].(map[string]interface{})
}
}
delete(m, fields[len(fields)-1])
}
// SetNestedSlice sets a nested slice from the provided map
func SetNestedSlice(obj map[string]interface{}, value []string, fields ...string) {
m := make([]interface{}, 0, len(value))
for _, v := range value {
m = append(m, v)
}
SetNestedField(obj, m, fields...)
}
// SetNestedMap sets a nested map from the provided map
func SetNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) {
m := make(map[string]interface{}, len(value))
for k, v := range value {
m[k] = v
}
SetNestedField(obj, m, fields...)
}
// MergeMapOfStrings will merge the map from src to dest
func MergeMapOfStrings(dest map[string]string, src map[string]string) bool {
// nil check as storing into a nil map panics
if dest == nil {
return false
}
for k, v := range src {
dest[k] = v
}
return true
}
// MergeMapOfObjects will merge the map from src to dest. It will override
// existing keys of the destination
func MergeMapOfObjects(dest map[string]interface{}, src map[string]interface{}) bool {
// nil check as storing into a nil map panics
if dest == nil {
return false
}
for k, v := range src {
dest[k] = v
}
return true
}
// GetMapOfStrings gets the direct value from the passed obj & the field path
// The value returned should be expected of the form map[string]string
func GetMapOfStrings(obj map[string]interface{}, field string) map[string]string {
if m, ok := obj[field].(map[string]string); ok {
return m
}
return nil
}
| 1 | 18,560 | remove this debug | openebs-maya | go |
@@ -286,10 +286,13 @@ auto data_type_layer<TensorDataType>::get_local_error_signals(int parent_index)
// Accessing matrices corresponding to parent/child layer
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_activations(const Layer& child) const -> const BaseDistMat& {
- const int child_index = (std::find(m_child_layers.begin(),
- m_child_layers.end(),
- &child)
- - m_child_layers.begin());
+ if(m_child_layers.empty()) {
+ LBANN_ERROR("This layer has no children");
+ }
+ const int child_index = std::distance(m_child_layers.begin(),
+ std::find(m_child_layers.begin(),
+ m_child_layers.end(),
+ &child));
if (child_index >= get_num_children()) {
std::stringstream err;
err << "attempted to get activation tensor of " | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_DATA_TYPE_LAYER_INSTANTIATE
#include "lbann/layers/data_type_layer.hpp"
#include "lbann/models/model.hpp"
#include "lbann/execution_contexts/sgd_execution_context.hpp"
namespace lbann {
template <typename TensorDataType>
data_type_layer<TensorDataType>::data_type_layer(const data_type_layer<TensorDataType>& other) :
Layer(other),
m_weights(other.m_weights) {
// Deep matrix copies
m_inputs.reserve(other.m_inputs.size());
m_outputs.reserve(other.m_outputs.size());
m_gradient_wrt_outputs.reserve(other.m_gradient_wrt_outputs.size());
m_gradient_wrt_inputs.reserve(other.m_gradient_wrt_inputs.size());
for (const auto& ptr : other.m_inputs) {
m_inputs.emplace_back(ptr ? ptr->Copy() : nullptr);
}
for (const auto& ptr : other.m_outputs) {
m_outputs.emplace_back(ptr ? ptr->Copy() : nullptr);
}
for (const auto& ptr : other.m_gradient_wrt_outputs) {
m_gradient_wrt_outputs.emplace_back(ptr ? ptr->Copy() : nullptr);
}
for (const auto& ptr : other.m_gradient_wrt_inputs) {
m_gradient_wrt_inputs.emplace_back(ptr ? ptr->Copy() : nullptr);
}
}
template <typename TensorDataType>
data_type_layer<TensorDataType>& data_type_layer<TensorDataType>::operator=(const data_type_layer<TensorDataType>& other) {
Layer::operator=(other);
// Shallow copies
m_weights = other.m_weights;
// Deep matrix copies
m_inputs.clear();
m_outputs.clear();
m_gradient_wrt_outputs.clear();
m_gradient_wrt_inputs.clear();
m_inputs.reserve(other.m_inputs.size());
m_outputs.reserve(other.m_outputs.size());
m_gradient_wrt_outputs.reserve(other.m_gradient_wrt_outputs.size());
m_gradient_wrt_inputs.reserve(other.m_gradient_wrt_inputs.size());
for (const auto& ptr : other.m_inputs) {
m_inputs.emplace_back(ptr ? ptr->Copy() : nullptr);
}
for (const auto& ptr : other.m_outputs) {
m_outputs.emplace_back(ptr ? ptr->Copy() : nullptr);
}
for (const auto& ptr : other.m_gradient_wrt_outputs) {
m_gradient_wrt_outputs.emplace_back(ptr ? ptr->Copy() : nullptr);
}
for (const auto& ptr : other.m_gradient_wrt_inputs) {
m_gradient_wrt_inputs.emplace_back(ptr ? ptr->Copy() : nullptr);
}
return *this;
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::forward_prop() {
const auto fp_start = get_time();
// Setup tensors
const auto& c = static_cast<sgd_execution_context&>(m_model->get_execution_context());
const auto& mini_batch_size = c.get_current_mini_batch_size();
fp_setup_inputs(mini_batch_size);
fp_setup_outputs(mini_batch_size);
#if defined(LBANN_HAS_GPU) && defined(LBANN_DEBUG)
// Synchronize GPUs and check for errors
if (using_gpus()) { El::GPUManager::SynchronizeDevice(true); }
#endif // defined(LBANN_HAS_GPU) && defined(LBANN_DEBUG)
// Apply layer's compute function
const auto fp_compute_start = get_time();
fp_compute();
m_fp_compute_time += get_time() - fp_compute_start;
// Add this layer as a gradient source for weight optimizers
for (auto&& w : get_data_type_weights()) {
optimizer* opt = w->get_optimizer();
if (opt != nullptr) { opt->add_gradient_source(this); }
}
#if defined(LBANN_HAS_GPU) && defined(LBANN_DEBUG)
// Synchronize GPUs and check for errors
if (using_gpus()) { El::GPUManager::SynchronizeDevice(true); }
#endif // defined(LBANN_HAS_GPU) && defined(LBANN_DEBUG)
m_fp_time += get_time() - fp_start;
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::back_prop() {
const auto bp_start = get_time();
// Setup tensors
const auto& c = static_cast<sgd_execution_context&>(m_model->get_execution_context());
const auto& mini_batch_size = c.get_current_mini_batch_size();
bp_setup_gradient_wrt_outputs(mini_batch_size);
bp_setup_gradient_wrt_inputs(mini_batch_size);
#if defined(LBANN_HAS_GPU) && defined(LBANN_DEBUG)
// Synchronize GPUs and check for errors
if (using_gpus()) { El::GPUManager::SynchronizeDevice(true); }
#endif // defined(LBANN_HAS_GPU) && defined(LBANN_DEBUG)
// Backprop the compute function.
const auto bp_compute_start = get_time();
bp_compute();
m_bp_compute_time += get_time() - bp_compute_start;
// Remove this layer as a gradient source for weight optimizers
for (auto&& w : get_data_type_weights()) {
auto&& opt = w->get_optimizer();
if (opt != nullptr) { opt->remove_gradient_source(this); }
}
#if defined(LBANN_HAS_GPU) && defined(LBANN_DEBUG)
// Synchronize GPUs and check for errors
if (using_gpus()) { El::GPUManager::SynchronizeDevice(true); }
#endif // defined(LBANN_HAS_GPU) && defined(LBANN_DEBUG)
m_bp_time += get_time() - bp_start;
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::summarize_matrices(lbann_summary& summarizer, int step) {
// Summarize activation matrices
const int num_children = get_num_children();
for (int i = 0; i < num_children; ++i) {
AbsDistMatReadProxyType<El::Device::CPU> acts(*m_outputs[i]);
std::string prefix = m_name + "/activations";
if (num_children > 1) { prefix += std::to_string(i); }
summarizer.reduce_mean(prefix + "/mean", acts.GetLocked(), step);
summarizer.reduce_min(prefix + "/min", acts.GetLocked(), step);
summarizer.reduce_max(prefix + "/max", acts.GetLocked(), step);
summarizer.reduce_stdev(prefix + "/stdev", acts.GetLocked(), step);
summarizer.reduce_2norm(prefix + "/2norm2", acts.GetLocked(), step);
}
// Summarize error signal matrices
const int num_parents = get_num_parents();
for (int i = 0; i < num_parents; ++i) {
AbsDistMatReadProxyType<El::Device::CPU> error_signals(*m_gradient_wrt_inputs[i]);
std::string prefix = m_name + "/error_signals";
if (num_parents > 1) { prefix += std::to_string(i); }
summarizer.reduce_mean(prefix + "/mean", error_signals.GetLocked(), step);
summarizer.reduce_min(prefix + "/min", error_signals.GetLocked(), step);
summarizer.reduce_max(prefix + "/max", error_signals.GetLocked(), step);
summarizer.reduce_stdev(prefix + "/stdev", error_signals.GetLocked(), step);
summarizer.reduce_2norm(prefix + "/2norm2", error_signals.GetLocked(), step);
}
}
// ===================================================================
// Tensor access functions
// ===================================================================
// Accessing distributed matrices
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_prev_activations(int parent_index) const -> const AbsDistMatrixType& {
if (parent_index < 0 || parent_index >= (int) m_inputs.size()) {
std::stringstream err;
err << "attempted to access invalid previous activation matrix "
<< "from " << m_name << " "
<< "(requested index " << parent_index << ", but there are "
<< m_inputs.size() << " previous activation matrices)";
LBANN_ERROR(err.str());
}
return *m_inputs[parent_index];
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_activations(int child_index) const -> const AbsDistMatrixType& {
if (child_index < 0 || child_index >= (int) m_outputs.size()) {
std::stringstream err;
err << "attempted to access invalid activation matrix "
<< "from " << m_name << " "
<< "(requested index " << child_index << ", but there are "
<< m_outputs.size() << " activation matrices)";
LBANN_ERROR(err.str());
}
return *m_outputs[child_index];
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_prev_error_signals(int child_index) const -> const AbsDistMatrixType& {
if (child_index < 0 || child_index >= (int) m_gradient_wrt_outputs.size()) {
std::stringstream err;
err << "attempted to access invalid previous error signal matrix "
<< "from " << m_name << " "
<< "(requested index " << child_index << ", but there are "
<< m_gradient_wrt_outputs.size() << " previous error signal matrices)";
LBANN_ERROR(err.str());
}
return *m_gradient_wrt_outputs[child_index];
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_error_signals(int parent_index) const -> const AbsDistMatrixType& {
if (parent_index < 0 || parent_index >= (int) m_gradient_wrt_inputs.size()) {
std::stringstream err;
err << "attempted to access invalid error signal matrix "
<< "from " << m_name << " "
<< "(requested index " << parent_index << ", but there are "
<< m_gradient_wrt_inputs.size() << " error signal matrices)";
LBANN_ERROR(err.str());
}
return *m_gradient_wrt_inputs[parent_index];
}
// Accessing non-const distributed matrices
// Note: Using idiom from Item 3, p. 23 in "Effective C++", 3rd ed.,
// by Scott Meyers.
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_activations(int child_index) -> AbsDistMatrixType& {
return const_cast<AbsDistMatrixType&>(static_cast<const data_type_layer<TensorDataType>&>(*this).get_activations(child_index));
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_error_signals(int parent_index) -> AbsDistMatrixType& {
return const_cast<AbsDistMatrixType&>(static_cast<const data_type_layer<TensorDataType>&>(*this).get_error_signals(parent_index));
}
// Accessing local matrices
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_local_activations(int child_index) -> AbsMatrixType& {
return get_activations(child_index).Matrix();
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_local_error_signals(int parent_index) -> AbsMatrixType& {
return get_error_signals(parent_index).Matrix();
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_local_prev_activations(int parent_index) const -> const AbsMatrixType&{
return get_prev_activations(parent_index).LockedMatrix();
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_local_activations(int child_index) const -> const AbsMatrixType& {
return get_activations(child_index).LockedMatrix();
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_local_prev_error_signals(int child_index) const -> const AbsMatrixType& {
return get_prev_error_signals(child_index).LockedMatrix();
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_local_error_signals(int parent_index) const -> const AbsMatrixType& {
return get_error_signals(parent_index).LockedMatrix();
}
// Accessing matrices corresponding to parent/child layer
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_activations(const Layer& child) const -> const BaseDistMat& {
const int child_index = (std::find(m_child_layers.begin(),
m_child_layers.end(),
&child)
- m_child_layers.begin());
if (child_index >= get_num_children()) {
std::stringstream err;
err << "attempted to get activation tensor of "
<< "layer \"" << get_name() << "\" "
<< "corresponding to layer\"" << child.get_name() << "\", "
<< "which is not a child layer";
LBANN_ERROR(err.str());
}
return get_activations(child_index);
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::get_error_signals(const Layer& parent) const -> const BaseDistMat& {
const int parent_index = (std::find(m_parent_layers.begin(),
m_parent_layers.end(),
&parent)
- m_parent_layers.begin());
if (parent_index >= get_num_parents()) {
std::stringstream err;
err << "attempted to get error signal tensor of "
<< "layer \"" << get_name() << "\" "
<< "corresponding to layer\"" << parent.get_name() << "\", "
<< "which is not a parent layer";
LBANN_ERROR(err.str());
}
return get_error_signals(parent_index);
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::setup_matrices(const El::Grid& grid) {
// Destroy previously setup matrices
m_inputs.clear();
m_outputs.clear();
m_gradient_wrt_outputs.clear();
m_gradient_wrt_inputs.clear();
// Construct matrices
m_inputs.resize(get_num_parents());
m_outputs.resize(get_num_children());
m_gradient_wrt_outputs.resize(get_num_children());
m_gradient_wrt_inputs.resize(get_num_parents());
for (int i = 0; i < get_num_parents(); ++i) {
m_inputs[i] = construct_matrix(grid, "input", i);
}
for (int i = 0; i < get_num_children(); ++i) {
m_outputs[i] = construct_matrix(grid, "output", i);
}
for (int i = 0; i < get_num_children(); ++i) {
m_gradient_wrt_outputs[i]
= construct_matrix(grid, "gradient_wrt_output", i);
}
for (int i = 0; i < get_num_parents(); ++i) {
m_gradient_wrt_inputs[i]
= construct_matrix(grid, "gradient_wrt_input", i);
}
}
template <typename TensorDataType>
auto data_type_layer<TensorDataType>::construct_matrix(const El::Grid& grid,
std::string type,
El::Int index) -> std::unique_ptr<AbsDistMatrixType> {
// Choose matrix distribution
El::Distribution col_dist, row_dist;
El::DistWrap wrap;
El::Device device = this->get_device_allocation();
switch (get_data_layout()) {
case data_layout::DATA_PARALLEL:
col_dist = El::STAR;
row_dist = El::VC;
wrap = El::ELEMENT;
break;
case data_layout::MODEL_PARALLEL:
col_dist = El::MC;
row_dist = El::MR;
wrap = El::ELEMENT;
break;
default: LBANN_ERROR("invalid data layout");
}
// Construct matrix
std::unique_ptr<AbsDistMatrixType> mat;
mat.reset(AbsDistMatrixType::Instantiate(grid, 0,
col_dist, row_dist, wrap, device));
#ifdef LBANN_HAS_GPU
// Allocate GPU memory with the CUDA API
if (device == El::Device::GPU) { mat->Matrix().SetMemoryMode(0); }
// Use pinned memory for data on the host.
if (device == El::Device::CPU) { mat->Matrix().SetMemoryMode(1); }
#endif // LBANN_HAS_GPU
return mat;
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::setup_data() {
Layer::setup_data();
// Get mini-batch size
const auto& mini_batch_size = m_model->get_max_mini_batch_size();
// Initialize input and output tensors
fp_setup_inputs(mini_batch_size);
fp_setup_outputs(mini_batch_size);
// Initialize gradient w.r.t. output tensors
// Note: We guess whether the tensor is a view or needs to allocate
// memory, but there are some edge cases that are not handled.
for (int i = 0; i < get_num_children(); ++i) {
const auto& child = *m_child_layers[i];
const auto& output = get_activations(i);
auto& gradient_wrt_output = *m_gradient_wrt_outputs[i];
gradient_wrt_output.Empty(false);
gradient_wrt_output.AlignWith(output);
if (child.get_data_layout() == get_data_layout()
&& child.get_device_allocation() == get_device_allocation()
&& gradient_wrt_output.DistData() == output.DistData()) {
El::LockedView(gradient_wrt_output, output);
} else {
El::Copy(output, gradient_wrt_output);
}
}
// Initialize gradient w.r.t. input tensors
bp_setup_gradient_wrt_inputs(mini_batch_size);
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::bp_compute() {
for (int i = 0; i < get_num_parents(); ++i) {
El::Zero(get_error_signals(i));
}
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::check_setup() {
Layer::check_setup();
std::stringstream err;
// Check number of tensors
const int num_parents = get_num_parents();
const int num_children = get_num_children();
if ((int) m_inputs.size() != num_parents
|| (int) m_outputs.size() != num_children
|| (int) m_gradient_wrt_outputs.size() != num_children
|| (int) m_gradient_wrt_inputs.size() != num_parents) {
err << "layer \"" << get_name() << "\" has an "
<< "invalid number of input and output tensors "
<< "(found " << num_parents << " parent layers, "
<< num_children << " child layers, "
<< m_inputs.size() << " input tensors, "
<< m_outputs.size() << " output tensors, "
<< m_gradient_wrt_outputs.size() << " gradient w.r.t. output tensors, "
<< m_gradient_wrt_inputs.size() << " gradient w.r.t. input tensors)";
LBANN_ERROR(err.str());
}
// Check that tensors are initialized
for (int i = 0; i < get_num_parents(); ++i) {
if (m_inputs[i] == nullptr) {
err << "layer \"" << get_name() << "\" has an "
<< "uninitialized input tensor (index " << i << ")";
LBANN_ERROR(err.str());
}
}
for (int i = 0; i < get_num_children(); ++i) {
if (m_outputs[i] == nullptr) {
err << "layer \"" << get_name() << "\" has an "
<< "uninitialized output tensor (index " << i << ")";
LBANN_ERROR(err.str());
}
}
for (int i = 0; i < get_num_children(); ++i) {
if (m_gradient_wrt_outputs[i] == nullptr) {
err << "layer \"" << get_name() << "\" has an "
<< "uninitialized gradient w.r.t. output tensor "
<< "(index " << i << ")";
LBANN_ERROR(err.str());
}
}
for (int i = 0; i < get_num_parents(); ++i) {
if (m_gradient_wrt_inputs[i] == nullptr) {
err << "layer \"" << get_name() << "\" has an "
<< "uninitialized gradient w.r.t. input tensor "
<< "(index " << i << ")";
LBANN_ERROR(err.str());
}
}
}
// ===========================================================
// Weights access functions
// ===========================================================
template <typename TensorDataType>
void data_type_layer<TensorDataType>::replace_weights(Layer* other_layer) {
if (other_layer == nullptr) {
LBANN_ERROR("attempted to add null pointer as a replacement layer");
}
const std::vector<WeightsType*>& other_layer_weights =
dynamic_cast<data_type_layer<TensorDataType>*>(other_layer)->get_data_type_weights();
for (size_t i = 0; i < m_weights.size(); ++i) {
m_weights[i]->set_values(other_layer_weights[i]->get_values());
}
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::fp_setup_inputs(El::Int mini_batch_size) {
if (get_num_parents() < 1) { return; }
// Determine distributed matrix alignment
const auto& alignment_dist = m_parent_layers.front()->get_activations(*this).DistData();
// Iterate through input tensors
for (int i = 0; i < get_num_parents(); ++i) {
// Initialize input tensor
const auto& parent = *m_parent_layers[i];
const auto& parent_output = parent.get_activations(*this);
auto& input = *m_inputs[i];
input.Empty(false);
input.AlignWith(alignment_dist);
if (parent_output.DistData() == input.DistData()) {
El::LockedView(input, dynamic_cast<const AbsDistMatrixType&>(parent_output));
} else {
bool async_copy = false;
#if defined(LBANN_HAS_GPU) && defined(ASYNC_INPUT_MEMORY_TRANSFER)
// Asynchronously copy CPU data to GPU data if they are otherwise aligned
if (parent_output.GetLocalDevice() == El::Device::CPU
&& input.GetLocalDevice() == El::Device::GPU) {
auto parent_dist_data = parent_output.DistData();
parent_dist_data.device = El::Device::GPU;
async_copy = parent_dist_data == input.DistData();
}
#endif // defined(LBANN_HAS_GPU) && defined(ASYNC_INPUT_MEMORY_TRANSFER)
if (async_copy) {
El::CopyAsync(parent_output, input);
} else {
El::Copy(parent_output, input);
}
}
// Check input matrix dimensions
const auto& height = get_input_size(i);
const auto& width = mini_batch_size;
if (input.Height() != height || input.Width() != width) {
std::stringstream err;
err << "layer \"" << get_name() << "\" "
<< "expected an input tensor stored in a "
<< height << " x " << width << " matrix "
<< "from layer \"" << parent.get_name() << "\", but got a "
<< input.Height() << " x " << input.Width() << " matrix";
LBANN_ERROR(err.str());
}
}
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::fp_setup_outputs(El::Int mini_batch_size) {
if (get_num_children() < 1) { return; }
// Determine distributed matrix alignment
const bool align_outputs = get_num_parents() > 0;
const auto& alignment_dist = (align_outputs ?
get_prev_activations().DistData() :
get_activations().DistData());
// Initialize output tensors
for (int i = 0; i < get_num_children(); ++i) {
auto& output = get_activations(i);
output.Empty(false);
if (align_outputs) { output.AlignWith(alignment_dist); }
output.Resize(get_output_size(i), mini_batch_size);
}
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::bp_setup_gradient_wrt_outputs(El::Int mini_batch_size) {
for (int i = 0; i < get_num_children(); ++i) {
// Initialize gradient w.r.t. output tensor
const auto& child = *m_child_layers[i];
const auto& child_gradient_wrt_input = child.get_error_signals(*this);
auto& gradient_wrt_output = *m_gradient_wrt_outputs[i];
gradient_wrt_output.Empty(false);
gradient_wrt_output.AlignWith(get_activations(i));
if (child_gradient_wrt_input.DistData()
== gradient_wrt_output.DistData()) {
El::LockedView(gradient_wrt_output, dynamic_cast<const AbsDistMatrixType&>(child_gradient_wrt_input));
} else {
bool async_copy = false;
#if defined(LBANN_HAS_GPU) && defined(ASYNC_INPUT_MEMORY_TRANSFER)
// Asynchronously copy CPU data to GPU data if they are otherwise aligned
if (child_gradient_wrt_input.GetLocalDevice() == El::Device::CPU
&& gradient_wrt_output.GetLocalDevice() == El::Device::GPU) {
auto child_dist_data = child_gradient_wrt_input.DistData();
child_dist_data.device = El::Device::GPU;
async_copy = child_dist_data == gradient_wrt_output.DistData();
}
#endif // defined(LBANN_HAS_GPU) && defined(ASYNC_INPUT_MEMORY_TRANSFER)
if (async_copy) {
El::CopyAsync(child_gradient_wrt_input, gradient_wrt_output);
} else {
El::Copy(child_gradient_wrt_input, gradient_wrt_output);
}
}
// Check gradient w.r.t. output matrix dimensions
const auto& height = get_output_size(i);
const auto& width = mini_batch_size;
if (gradient_wrt_output.Height() != height
|| gradient_wrt_output.Width() != width) {
std::stringstream err;
err << "layer \"" << get_name() << "\" "
<< "expected a gradient w.r.t. output tensor stored in a "
<< height << " x " << width << " matrix "
<< "from layer \"" << child.get_name() << "\", but got a "
<< gradient_wrt_output.Height() << " x "
<< gradient_wrt_output.Width() << " matrix";
LBANN_ERROR(err.str());
}
}
}
template <typename TensorDataType>
void data_type_layer<TensorDataType>::bp_setup_gradient_wrt_inputs(El::Int mini_batch_size) {
for (int i = 0; i < get_num_parents(); ++i) {
auto& gradient_wrt_input = get_error_signals(i);
gradient_wrt_input.Empty(false);
gradient_wrt_input.AlignWith(get_prev_activations(i));
gradient_wrt_input.Resize(get_input_size(i), mini_batch_size);
}
}
#define PROTO(T) \
template class data_type_layer<T>
#define LBANN_INSTANTIATE_CPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| 1 | 15,056 | This should change to a call to `Layer::find_layer_index`, which should be renamed to `find_child_layer_index` and it should return a `size_t` (technically the `difference_type` for whatever iterators are going to be used). | LLNL-lbann | cpp |
@@ -114,7 +114,7 @@ class py2exe(build_exe.py2exe):
def build_manifest(self, target, template):
mfest, rid = build_exe.py2exe.build_manifest(self, target, template)
- if getattr(target, "script", None) == "nvda.pyw":
+ if getattr(target, "script", "").endswith(".pyw"):
# This is one of the main application executables.
mfest = mfest[:mfest.rindex("</assembly>")]
mfest += MAIN_MANIFEST_EXTRA + "</assembly>" | 1 | # -*- coding: UTF-8 -*-
#setup.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Joseph Lee
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import os
import copy
import gettext
gettext.install("nvda", unicode=True)
from distutils.core import setup
import py2exe as py2exeModule
from glob import glob
import fnmatch
from versionInfo import *
from py2exe import build_exe
import wx
import imp
MAIN_MANIFEST_EXTRA = r"""
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.dll">
<comClass
description="HtBrailleDriver Class"
clsid="{209445BA-92ED-4AB2-83EC-F24ACEE77EE0}"
threadingModel="Apartment"
progid="HtBrailleDriverServer.HtBrailleDriver"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
</file>
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.tlb">
<typelib tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}"
version="1.0"
helpdir="" />
</file>
<comInterfaceExternalProxyStub
name="IHtBrailleDriverSink"
iid="{EF551F82-1C7E-421F-963D-D9D03548785A}"
proxyStubClsid32="{00020420-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<comInterfaceExternalProxyStub
name="IHtBrailleDriver"
iid="{43A71F9B-58EE-42D4-B58E-0F9FBA28D995}"
proxyStubClsid32="{00020424-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows Vista -->
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
<!-- Windows 7 -->
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
<!-- Windows 8 -->
<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
<!-- Windows 8.1 -->
<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
"""
def getModuleExtention(thisModType):
for ext,mode,modType in imp.get_suffixes():
if modType==thisModType:
return ext
raise ValueError("unknown mod type %s"%thisModType)
# py2exe's idea of whether a dll is a system dll appears to be wrong sometimes, so monkey patch it.
origIsSystemDLL = build_exe.isSystemDLL
def isSystemDLL(pathname):
dll = os.path.basename(pathname).lower()
if dll in ("msvcp71.dll", "msvcp90.dll", "gdiplus.dll","mfc71.dll", "mfc90.dll"):
# These dlls don't exist on many systems, so make sure they're included.
return 0
elif dll.startswith("api-ms-win-") or dll in ("powrprof.dll", "mpr.dll", "crypt32.dll"):
# These are definitely system dlls available on all systems and must be excluded.
# Including them can cause serious problems when a binary build is run on a different version of Windows.
return 1
return origIsSystemDLL(pathname)
build_exe.isSystemDLL = isSystemDLL
class py2exe(build_exe.py2exe):
"""Overridden py2exe command to:
* Add a command line option --enable-uiAccess to enable uiAccess for the main executable
* Add extra info to the manifest
* Don't copy w9xpopen, as NVDA will never run on Win9x
"""
user_options = build_exe.py2exe.user_options + [
("enable-uiAccess", "u", "enable uiAccess for the main executable"),
]
def initialize_options(self):
build_exe.py2exe.initialize_options(self)
self.enable_uiAccess = False
def copy_w9xpopen(self, modules, dlls):
pass
def run(self):
dist = self.distribution
if self.enable_uiAccess:
# Add a target for nvda_uiAccess, using nvda_noUIAccess as a base.
target = copy.deepcopy(dist.windows[0])
target["dest_base"] = "nvda_uiAccess"
target["uac_info"] = (target["uac_info"][0], True)
dist.windows.insert(1, target)
# nvda_eoaProxy should have uiAccess.
target = dist.windows[3]
target["uac_info"] = (target["uac_info"][0], True)
build_exe.py2exe.run(self)
def build_manifest(self, target, template):
mfest, rid = build_exe.py2exe.build_manifest(self, target, template)
if getattr(target, "script", None) == "nvda.pyw":
# This is one of the main application executables.
mfest = mfest[:mfest.rindex("</assembly>")]
mfest += MAIN_MANIFEST_EXTRA + "</assembly>"
return mfest, rid
def getLocaleDataFiles():
wxDir=wx.__path__[0]
localeMoFiles=set()
for f in glob("locale/*/LC_MESSAGES"):
localeMoFiles.add((f, (os.path.join(f,"nvda.mo"),)))
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
lang=os.path.split(os.path.split(f)[0])[1]
if '_' in lang:
lang=lang.split('_')[0]
f=os.path.join('locale',lang,'lc_messages')
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
localeDicFiles=[(os.path.dirname(f), (f,)) for f in glob("locale/*/*.dic")]
NVDALocaleGestureMaps=[(os.path.dirname(f), (f,)) for f in glob("locale/*/gestures.ini")]
return list(localeMoFiles)+localeDicFiles+NVDALocaleGestureMaps
def getRecursiveDataFiles(dest,source,excludes=()):
rulesList=[]
rulesList.append((dest,
[f for f in glob("%s/*"%source) if not any(fnmatch.fnmatch(f,exclude) for exclude in excludes) and os.path.isfile(f)]))
[rulesList.extend(getRecursiveDataFiles(os.path.join(dest,dirName),os.path.join(source,dirName),excludes=excludes)) for dirName in os.listdir(source) if os.path.isdir(os.path.join(source,dirName)) and not dirName.startswith('.')]
return rulesList
compiledModExtention = getModuleExtention(imp.PY_COMPILED)
sourceModExtention = getModuleExtention(imp.PY_SOURCE)
setup(
name = name,
version=version,
description=description,
url=url,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Win32 (MS Windows)',
'Topic :: Adaptive Technologies'
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows',
],
cmdclass={"py2exe": py2exe},
windows=[
{
"script":"nvda.pyw",
"dest_base":"nvda_noUIAccess",
"uac_info": ("asInvoker", False),
"icon_resources":[(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description":"NVDA application",
"product_version":version,
"copyright":copyright,
"company_name":publisher,
},
# The nvda_uiAccess target will be added at runtime if required.
{
"script": "nvda_slave.pyw",
"icon_resources": [(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description": name,
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
{
"script": "nvda_eoaProxy.pyw",
# uiAccess will be enabled at runtime if appropriate.
"uac_info": ("asInvoker", False),
"icon_resources": [(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description": "NVDA Ease of Access proxy",
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
],
options = {"py2exe": {
"bundle_files": 3,
"excludes": ["Tkinter",
"serial.loopback_connection", "serial.rfc2217", "serial.serialcli", "serial.serialjava", "serial.serialposix", "serial.socket_connection"],
"packages": ["NVDAObjects","virtualBuffers","appModules","comInterfaces","brailleDisplayDrivers","synthDrivers"],
# #3368: bisect was implicitly included with Python 2.7.3, but isn't with 2.7.5.
# Also, the service executable used win32api, which some add-ons use for various purposes.
# Explicitly include them so we don't break some add-ons.
"includes": ["nvdaBuiltin", "bisect", "win32api"],
}},
data_files=[
(".",glob("*.dll")+glob("*.manifest")+["builtin.dic"]),
("documentation", ['../copying.txt', '../contributors.txt']),
("lib/%s"%version, glob("lib/*.dll")),
("lib64/%s"%version, glob("lib64/*.dll") + glob("lib64/*.exe")),
("waves", glob("waves/*.wav")),
("images", glob("images/*.ico")),
("louis/tables",glob("louis/tables/*")),
(".", ['message.html' ])
] + (
getLocaleDataFiles()
+ getRecursiveDataFiles("synthDrivers", "synthDrivers",
excludes=("*%s" % sourceModExtention, "*%s" % compiledModExtention, "*.exp", "*.lib", "*.pdb"))
+ getRecursiveDataFiles("brailleDisplayDrivers", "brailleDisplayDrivers", excludes=("*%s"%sourceModExtention,"*%s"%compiledModExtention))
+ getRecursiveDataFiles('documentation', '../user_docs', excludes=('*.t2t', '*.t2tconf', '*/developerGuide.*'))
),
)
| 1 | 21,812 | what is this change about? | nvaccess-nvda | py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.